2024-12-11 02:25:59,612 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-11 02:25:59,628 main DEBUG Took 0.013404 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 02:25:59,628 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 02:25:59,629 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 02:25:59,629 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 02:25:59,631 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,639 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 02:25:59,654 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,655 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,656 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,656 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,657 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,657 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,659 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,659 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,659 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,660 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,661 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,661 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,662 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,662 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,663 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,663 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,663 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,664 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,665 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,665 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,665 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 02:25:59,666 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,666 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 02:25:59,668 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 02:25:59,669 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 02:25:59,671 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 02:25:59,671 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 02:25:59,672 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 02:25:59,672 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 02:25:59,681 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 02:25:59,683 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 02:25:59,685 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 02:25:59,685 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 02:25:59,686 main DEBUG createAppenders(={Console}) 2024-12-11 02:25:59,686 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-11 02:25:59,687 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-11 02:25:59,687 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-11 02:25:59,687 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 02:25:59,688 main DEBUG OutputStream closed 2024-12-11 02:25:59,688 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 02:25:59,688 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 02:25:59,688 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-11 02:25:59,755 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 02:25:59,758 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 02:25:59,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 02:25:59,759 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 02:25:59,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 02:25:59,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 02:25:59,760 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 02:25:59,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 02:25:59,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 02:25:59,761 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 02:25:59,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 02:25:59,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 02:25:59,762 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 02:25:59,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 02:25:59,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 02:25:59,763 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 02:25:59,764 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 02:25:59,765 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 02:25:59,767 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 02:25:59,767 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-11 02:25:59,768 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 02:25:59,768 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-11T02:26:00,067 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7 2024-12-11 02:26:00,070 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 02:26:00,071 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T02:26:00,079 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-11T02:26:00,098 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T02:26:00,101 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3, deleteOnExit=true 2024-12-11T02:26:00,101 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-11T02:26:00,102 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/test.cache.data in system properties and HBase conf 2024-12-11T02:26:00,102 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T02:26:00,103 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.log.dir in system properties and HBase conf 2024-12-11T02:26:00,103 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T02:26:00,104 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T02:26:00,104 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-11T02:26:00,196 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T02:26:00,289 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T02:26:00,293 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T02:26:00,293 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T02:26:00,294 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T02:26:00,294 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T02:26:00,294 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T02:26:00,295 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T02:26:00,295 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T02:26:00,295 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T02:26:00,296 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T02:26:00,296 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/nfs.dump.dir in system properties and HBase conf 2024-12-11T02:26:00,296 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/java.io.tmpdir in system properties and HBase conf 2024-12-11T02:26:00,297 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T02:26:00,297 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T02:26:00,298 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T02:26:01,150 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T02:26:01,235 INFO [Time-limited test {}] log.Log(170): Logging initialized @2367ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T02:26:01,317 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T02:26:01,388 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T02:26:01,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T02:26:01,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T02:26:01,413 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T02:26:01,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T02:26:01,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.log.dir/,AVAILABLE} 2024-12-11T02:26:01,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T02:26:01,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/java.io.tmpdir/jetty-localhost-41263-hadoop-hdfs-3_4_1-tests_jar-_-any-13207789086317554842/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T02:26:01,686 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:41263} 2024-12-11T02:26:01,687 INFO [Time-limited test {}] server.Server(415): Started @2820ms 2024-12-11T02:26:02,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T02:26:02,125 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T02:26:02,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T02:26:02,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T02:26:02,127 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T02:26:02,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.log.dir/,AVAILABLE} 2024-12-11T02:26:02,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T02:26:02,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/java.io.tmpdir/jetty-localhost-44633-hadoop-hdfs-3_4_1-tests_jar-_-any-12222739051907631287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T02:26:02,252 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:44633} 2024-12-11T02:26:02,252 INFO [Time-limited test {}] server.Server(415): Started @3385ms 2024-12-11T02:26:02,309 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T02:26:02,803 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data1/current/BP-442462323-172.17.0.2-1733883960905/current, will proceed with Du for space computation calculation, 2024-12-11T02:26:02,803 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data2/current/BP-442462323-172.17.0.2-1733883960905/current, will proceed with Du for space computation calculation, 2024-12-11T02:26:02,854 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T02:26:02,921 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4395d89e153670d with lease ID 0x9728125417232a1a: Processing first storage report for DS-3236bb8d-c196-43c6-8235-42fd3971ebe6 from datanode DatanodeRegistration(127.0.0.1:46759, datanodeUuid=c67fabea-812a-4cd3-8643-47d8f6db4f2d, infoPort=45779, infoSecurePort=0, ipcPort=36609, storageInfo=lv=-57;cid=testClusterID;nsid=551371732;c=1733883960905) 2024-12-11T02:26:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4395d89e153670d with lease ID 0x9728125417232a1a: from storage DS-3236bb8d-c196-43c6-8235-42fd3971ebe6 node DatanodeRegistration(127.0.0.1:46759, datanodeUuid=c67fabea-812a-4cd3-8643-47d8f6db4f2d, infoPort=45779, infoSecurePort=0, ipcPort=36609, storageInfo=lv=-57;cid=testClusterID;nsid=551371732;c=1733883960905), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-11T02:26:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4395d89e153670d with lease ID 0x9728125417232a1a: Processing first storage report for DS-fb74f7a1-13dd-4285-85bb-d2592013e3be from datanode DatanodeRegistration(127.0.0.1:46759, datanodeUuid=c67fabea-812a-4cd3-8643-47d8f6db4f2d, infoPort=45779, infoSecurePort=0, ipcPort=36609, storageInfo=lv=-57;cid=testClusterID;nsid=551371732;c=1733883960905) 2024-12-11T02:26:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4395d89e153670d with lease ID 0x9728125417232a1a: from storage DS-fb74f7a1-13dd-4285-85bb-d2592013e3be node DatanodeRegistration(127.0.0.1:46759, datanodeUuid=c67fabea-812a-4cd3-8643-47d8f6db4f2d, infoPort=45779, infoSecurePort=0, ipcPort=36609, storageInfo=lv=-57;cid=testClusterID;nsid=551371732;c=1733883960905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T02:26:02,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7 2024-12-11T02:26:03,022 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/zookeeper_0, clientPort=63149, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T02:26:03,033 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63149 2024-12-11T02:26:03,045 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:03,048 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:03,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741825_1001 (size=7) 2024-12-11T02:26:03,706 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 with version=8 2024-12-11T02:26:03,706 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/hbase-staging 2024-12-11T02:26:03,843 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T02:26:04,126 INFO [Time-limited test {}] client.ConnectionUtils(129): master/5f57a24c5131:0 server-side Connection retries=45 2024-12-11T02:26:04,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,147 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,147 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T02:26:04,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T02:26:04,294 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T02:26:04,355 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T02:26:04,363 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T02:26:04,367 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T02:26:04,395 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12205 (auto-detected) 2024-12-11T02:26:04,396 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T02:26:04,416 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40407 2024-12-11T02:26:04,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:04,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:04,441 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40407 connecting to ZooKeeper ensemble=127.0.0.1:63149 2024-12-11T02:26:04,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404070x0, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T02:26:04,478 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40407-0x1007ee55f5b0000 connected 2024-12-11T02:26:04,512 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T02:26:04,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T02:26:04,518 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T02:26:04,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-11T02:26:04,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40407 2024-12-11T02:26:04,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40407 2024-12-11T02:26:04,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-11T02:26:04,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40407 2024-12-11T02:26:04,533 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6, hbase.cluster.distributed=false 2024-12-11T02:26:04,602 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5f57a24c5131:0 server-side Connection retries=45 2024-12-11T02:26:04,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,602 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T02:26:04,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T02:26:04,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T02:26:04,605 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T02:26:04,607 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T02:26:04,608 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40311 2024-12-11T02:26:04,610 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T02:26:04,616 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T02:26:04,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:04,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:04,623 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40311 connecting to ZooKeeper ensemble=127.0.0.1:63149 2024-12-11T02:26:04,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403110x0, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T02:26:04,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40311-0x1007ee55f5b0001 connected 2024-12-11T02:26:04,629 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T02:26:04,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T02:26:04,631 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T02:26:04,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40311 2024-12-11T02:26:04,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40311 2024-12-11T02:26:04,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40311 2024-12-11T02:26:04,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40311 2024-12-11T02:26:04,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40311 2024-12-11T02:26:04,639 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/5f57a24c5131,40407,1733883963836 2024-12-11T02:26:04,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T02:26:04,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T02:26:04,649 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5f57a24c5131,40407,1733883963836 2024-12-11T02:26:04,657 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5f57a24c5131:40407 2024-12-11T02:26:04,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T02:26:04,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T02:26:04,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:04,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:04,672 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T02:26:04,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T02:26:04,673 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5f57a24c5131,40407,1733883963836 from backup master directory 2024-12-11T02:26:04,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5f57a24c5131,40407,1733883963836 2024-12-11T02:26:04,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T02:26:04,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T02:26:04,678 WARN [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T02:26:04,678 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5f57a24c5131,40407,1733883963836 2024-12-11T02:26:04,680 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T02:26:04,682 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T02:26:04,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741826_1002 (size=42) 2024-12-11T02:26:05,154 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/hbase.id with ID: fe164e1a-2940-4c81-a9a7-82eb45c899c7 2024-12-11T02:26:05,196 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T02:26:05,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:05,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:05,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741827_1003 (size=196) 2024-12-11T02:26:05,259 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:26:05,261 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T02:26:05,279 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:05,283 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T02:26:05,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741828_1004 (size=1189) 2024-12-11T02:26:05,733 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store 2024-12-11T02:26:05,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741829_1005 (size=34) 2024-12-11T02:26:06,155 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T02:26:06,156 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:06,157 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T02:26:06,157 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:26:06,157 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:26:06,158 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T02:26:06,158 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:26:06,158 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:26:06,158 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T02:26:06,160 WARN [master/5f57a24c5131:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/.initializing 2024-12-11T02:26:06,160 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/WALs/5f57a24c5131,40407,1733883963836 2024-12-11T02:26:06,167 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T02:26:06,179 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f57a24c5131%2C40407%2C1733883963836, suffix=, logDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/WALs/5f57a24c5131,40407,1733883963836, archiveDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/oldWALs, maxLogs=10 2024-12-11T02:26:06,202 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/WALs/5f57a24c5131,40407,1733883963836/5f57a24c5131%2C40407%2C1733883963836.1733883966183, exclude list is [], retry=0 2024-12-11T02:26:06,219 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46759,DS-3236bb8d-c196-43c6-8235-42fd3971ebe6,DISK] 2024-12-11T02:26:06,222 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T02:26:06,259 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/WALs/5f57a24c5131,40407,1733883963836/5f57a24c5131%2C40407%2C1733883963836.1733883966183 2024-12-11T02:26:06,260 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45779:45779)] 2024-12-11T02:26:06,260 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:06,261 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:06,264 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,265 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T02:26:06,331 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:06,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:06,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T02:26:06,338 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:06,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:06,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T02:26:06,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:06,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:06,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T02:26:06,347 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:06,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:06,351 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,352 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,361 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T02:26:06,364 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T02:26:06,369 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:26:06,370 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71994091, jitterRate=0.0727955549955368}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T02:26:06,375 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T02:26:06,376 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T02:26:06,407 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dd3d717, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:06,445 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-11T02:26:06,458 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T02:26:06,458 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T02:26:06,461 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T02:26:06,462 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T02:26:06,468 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-11T02:26:06,468 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T02:26:06,496 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T02:26:06,510 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T02:26:06,514 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-11T02:26:06,517 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T02:26:06,518 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T02:26:06,520 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-11T02:26:06,522 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T02:26:06,525 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T02:26:06,527 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-11T02:26:06,528 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T02:26:06,530 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T02:26:06,542 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T02:26:06,543 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T02:26:06,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T02:26:06,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T02:26:06,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,549 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=5f57a24c5131,40407,1733883963836, sessionid=0x1007ee55f5b0000, setting cluster-up flag (Was=false) 2024-12-11T02:26:06,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,568 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T02:26:06,569 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5f57a24c5131,40407,1733883963836 2024-12-11T02:26:06,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:06,580 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T02:26:06,582 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5f57a24c5131,40407,1733883963836 2024-12-11T02:26:06,656 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5f57a24c5131:40311 2024-12-11T02:26:06,658 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1008): ClusterId : fe164e1a-2940-4c81-a9a7-82eb45c899c7 2024-12-11T02:26:06,662 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T02:26:06,663 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-11T02:26:06,668 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T02:26:06,668 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T02:26:06,670 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-11T02:26:06,671 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T02:26:06,672 DEBUG [RS:0;5f57a24c5131:40311 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@114cfc65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:06,673 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T02:26:06,673 DEBUG [RS:0;5f57a24c5131:40311 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c619217, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5f57a24c5131/172.17.0.2:0 2024-12-11T02:26:06,676 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-11T02:26:06,677 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-11T02:26:06,677 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-11T02:26:06,679 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(3073): reportForDuty to master=5f57a24c5131,40407,1733883963836 with isa=5f57a24c5131/172.17.0.2:40311, startcode=1733883964600 2024-12-11T02:26:06,679 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5f57a24c5131,40407,1733883963836 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T02:26:06,683 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5f57a24c5131:0, corePoolSize=5, maxPoolSize=5 2024-12-11T02:26:06,683 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5f57a24c5131:0, corePoolSize=5, maxPoolSize=5 2024-12-11T02:26:06,683 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5f57a24c5131:0, corePoolSize=5, maxPoolSize=5 2024-12-11T02:26:06,683 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5f57a24c5131:0, corePoolSize=5, maxPoolSize=5 2024-12-11T02:26:06,684 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5f57a24c5131:0, corePoolSize=10, maxPoolSize=10 2024-12-11T02:26:06,684 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,684 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5f57a24c5131:0, corePoolSize=2, maxPoolSize=2 2024-12-11T02:26:06,684 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,686 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733883996686 2024-12-11T02:26:06,688 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T02:26:06,689 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T02:26:06,690 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-11T02:26:06,691 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-11T02:26:06,692 DEBUG [RS:0;5f57a24c5131:40311 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T02:26:06,693 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T02:26:06,693 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T02:26:06,694 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T02:26:06,694 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T02:26:06,695 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,696 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T02:26:06,696 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:06,696 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T02:26:06,697 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T02:26:06,697 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T02:26:06,700 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T02:26:06,700 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T02:26:06,702 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.large.0-1733883966701,5,FailOnTimeoutGroup] 2024-12-11T02:26:06,703 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.small.0-1733883966702,5,FailOnTimeoutGroup] 2024-12-11T02:26:06,703 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,703 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T02:26:06,705 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,705 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741831_1007 (size=1039) 2024-12-11T02:26:06,709 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-11T02:26:06,710 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:26:06,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741832_1008 (size=32) 2024-12-11T02:26:06,732 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60869, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T02:26:06,738 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,740 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(486): Registering regionserver=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,756 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:26:06,757 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37113 2024-12-11T02:26:06,757 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-11T02:26:06,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T02:26:06,762 DEBUG [RS:0;5f57a24c5131:40311 {}] zookeeper.ZKUtil(111): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,762 WARN [RS:0;5f57a24c5131:40311 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T02:26:06,762 INFO [RS:0;5f57a24c5131:40311 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T02:26:06,762 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,764 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5f57a24c5131,40311,1733883964600] 2024-12-11T02:26:06,777 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-11T02:26:06,789 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T02:26:06,802 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T02:26:06,805 INFO [RS:0;5f57a24c5131:40311 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T02:26:06,805 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,806 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-11T02:26:06,813 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,814 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,814 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,814 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,814 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,814 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,815 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5f57a24c5131:0, corePoolSize=2, maxPoolSize=2 2024-12-11T02:26:06,815 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,815 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,816 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,816 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,816 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5f57a24c5131:0, corePoolSize=1, maxPoolSize=1 2024-12-11T02:26:06,816 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5f57a24c5131:0, corePoolSize=3, maxPoolSize=3 2024-12-11T02:26:06,816 DEBUG [RS:0;5f57a24c5131:40311 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0, corePoolSize=3, maxPoolSize=3 2024-12-11T02:26:06,817 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,818 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,818 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,818 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,818 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40311,1733883964600-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T02:26:06,847 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-11T02:26:06,850 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40311,1733883964600-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:06,874 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.Replication(204): 5f57a24c5131,40311,1733883964600 started 2024-12-11T02:26:06,874 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1767): Serving as 5f57a24c5131,40311,1733883964600, RpcServer on 5f57a24c5131/172.17.0.2:40311, sessionid=0x1007ee55f5b0001 2024-12-11T02:26:06,875 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T02:26:06,875 DEBUG [RS:0;5f57a24c5131:40311 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,875 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5f57a24c5131,40311,1733883964600' 2024-12-11T02:26:06,875 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T02:26:06,876 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T02:26:06,877 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T02:26:06,877 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T02:26:06,877 DEBUG [RS:0;5f57a24c5131:40311 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:06,877 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5f57a24c5131,40311,1733883964600' 2024-12-11T02:26:06,877 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T02:26:06,878 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T02:26:06,878 DEBUG [RS:0;5f57a24c5131:40311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T02:26:06,878 INFO [RS:0;5f57a24c5131:40311 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T02:26:06,878 INFO [RS:0;5f57a24c5131:40311 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T02:26:06,985 INFO [RS:0;5f57a24c5131:40311 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T02:26:06,988 INFO [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f57a24c5131%2C40311%2C1733883964600, suffix=, logDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600, archiveDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/oldWALs, maxLogs=32 2024-12-11T02:26:07,005 DEBUG [RS:0;5f57a24c5131:40311 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600/5f57a24c5131%2C40311%2C1733883964600.1733883966990, exclude list is [], retry=0 2024-12-11T02:26:07,010 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46759,DS-3236bb8d-c196-43c6-8235-42fd3971ebe6,DISK] 2024-12-11T02:26:07,014 INFO [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600/5f57a24c5131%2C40311%2C1733883964600.1733883966990 2024-12-11T02:26:07,015 DEBUG [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45779:45779)] 2024-12-11T02:26:07,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:07,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T02:26:07,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T02:26:07,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T02:26:07,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T02:26:07,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T02:26:07,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T02:26:07,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,142 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740 2024-12-11T02:26:07,143 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740 2024-12-11T02:26:07,147 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:26:07,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-11T02:26:07,154 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:26:07,155 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64866731, jitterRate=-0.03341038525104523}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:26:07,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-11T02:26:07,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-11T02:26:07,158 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-11T02:26:07,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-11T02:26:07,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T02:26:07,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T02:26:07,160 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-11T02:26:07,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-11T02:26:07,163 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-11T02:26:07,164 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-11T02:26:07,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T02:26:07,178 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T02:26:07,180 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T02:26:07,332 DEBUG [5f57a24c5131:40407 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-11T02:26:07,337 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:07,342 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5f57a24c5131,40311,1733883964600, state=OPENING 2024-12-11T02:26:07,348 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T02:26:07,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:07,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:07,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T02:26:07,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T02:26:07,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:07,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:07,530 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T02:26:07,534 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T02:26:07,545 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-11T02:26:07,545 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T02:26:07,546 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T02:26:07,549 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f57a24c5131%2C40311%2C1733883964600.meta, suffix=.meta, logDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600, archiveDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/oldWALs, maxLogs=32 2024-12-11T02:26:07,566 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600/5f57a24c5131%2C40311%2C1733883964600.meta.1733883967551.meta, exclude list is [], retry=0 2024-12-11T02:26:07,570 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46759,DS-3236bb8d-c196-43c6-8235-42fd3971ebe6,DISK] 2024-12-11T02:26:07,573 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/WALs/5f57a24c5131,40311,1733883964600/5f57a24c5131%2C40311%2C1733883964600.meta.1733883967551.meta 2024-12-11T02:26:07,574 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45779:45779)] 2024-12-11T02:26:07,574 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:07,575 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T02:26:07,636 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T02:26:07,641 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T02:26:07,646 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T02:26:07,646 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:07,646 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-11T02:26:07,646 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-11T02:26:07,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T02:26:07,652 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T02:26:07,652 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T02:26:07,655 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T02:26:07,655 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,656 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T02:26:07,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T02:26:07,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T02:26:07,661 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740 2024-12-11T02:26:07,664 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740 2024-12-11T02:26:07,667 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:26:07,671 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-11T02:26:07,672 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69853792, jitterRate=0.04090261459350586}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:26:07,674 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-11T02:26:07,682 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733883967522 2024-12-11T02:26:07,693 DEBUG [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T02:26:07,694 INFO [RS_OPEN_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-11T02:26:07,695 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:07,697 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5f57a24c5131,40311,1733883964600, state=OPEN 2024-12-11T02:26:07,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T02:26:07,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T02:26:07,701 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T02:26:07,702 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T02:26:07,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T02:26:07,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=5f57a24c5131,40311,1733883964600 in 348 msec 2024-12-11T02:26:07,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T02:26:07,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 537 msec 2024-12-11T02:26:07,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1000 sec 2024-12-11T02:26:07,717 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733883967716, completionTime=-1 2024-12-11T02:26:07,717 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-11T02:26:07,717 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-11T02:26:07,756 DEBUG [hconnection-0x2d5916a5-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:07,759 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:07,770 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-11T02:26:07,770 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733884027770 2024-12-11T02:26:07,770 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733884087770 2024-12-11T02:26:07,770 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-12-11T02:26:07,792 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:07,792 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:07,792 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:07,793 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5f57a24c5131:40407, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:07,794 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:07,799 DEBUG [master/5f57a24c5131:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-11T02:26:07,802 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-11T02:26:07,803 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T02:26:07,810 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-11T02:26:07,813 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:26:07,815 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:07,817 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:26:07,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741835_1011 (size=358) 2024-12-11T02:26:08,233 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5519ba8b50773a902ba9dca0bed2059c, NAME => 'hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:26:08,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741836_1012 (size=42) 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 5519ba8b50773a902ba9dca0bed2059c, disabling compactions & flushes 2024-12-11T02:26:08,645 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. after waiting 0 ms 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:08,645 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:08,645 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5519ba8b50773a902ba9dca0bed2059c: 2024-12-11T02:26:08,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:26:08,656 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733883968649"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733883968649"}]},"ts":"1733883968649"} 2024-12-11T02:26:08,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:26:08,686 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:26:08,689 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883968686"}]},"ts":"1733883968686"} 2024-12-11T02:26:08,694 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-11T02:26:08,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5519ba8b50773a902ba9dca0bed2059c, ASSIGN}] 2024-12-11T02:26:08,703 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=5519ba8b50773a902ba9dca0bed2059c, ASSIGN 2024-12-11T02:26:08,705 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=5519ba8b50773a902ba9dca0bed2059c, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:26:08,855 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5519ba8b50773a902ba9dca0bed2059c, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:08,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 5519ba8b50773a902ba9dca0bed2059c, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:09,014 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:09,021 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:09,022 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 5519ba8b50773a902ba9dca0bed2059c, NAME => 'hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:09,022 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,022 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:09,022 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,022 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,025 INFO [StoreOpener-5519ba8b50773a902ba9dca0bed2059c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,028 INFO [StoreOpener-5519ba8b50773a902ba9dca0bed2059c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5519ba8b50773a902ba9dca0bed2059c columnFamilyName info 2024-12-11T02:26:09,028 DEBUG [StoreOpener-5519ba8b50773a902ba9dca0bed2059c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:09,029 INFO [StoreOpener-5519ba8b50773a902ba9dca0bed2059c-1 {}] regionserver.HStore(327): Store=5519ba8b50773a902ba9dca0bed2059c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:09,031 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,031 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,035 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:26:09,039 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:26:09,040 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 5519ba8b50773a902ba9dca0bed2059c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67430190, jitterRate=0.004788130521774292}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T02:26:09,042 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 5519ba8b50773a902ba9dca0bed2059c: 2024-12-11T02:26:09,044 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c., pid=6, masterSystemTime=1733883969014 2024-12-11T02:26:09,048 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:09,048 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:26:09,049 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=5519ba8b50773a902ba9dca0bed2059c, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:09,057 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T02:26:09,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 5519ba8b50773a902ba9dca0bed2059c, server=5f57a24c5131,40311,1733883964600 in 193 msec 2024-12-11T02:26:09,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T02:26:09,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=5519ba8b50773a902ba9dca0bed2059c, ASSIGN in 357 msec 2024-12-11T02:26:09,062 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:26:09,063 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883969062"}]},"ts":"1733883969062"} 2024-12-11T02:26:09,065 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-11T02:26:09,069 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:26:09,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2660 sec 2024-12-11T02:26:09,114 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-11T02:26:09,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:09,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-11T02:26:09,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:26:09,149 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-11T02:26:09,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-11T02:26:09,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 25 msec 2024-12-11T02:26:09,184 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-11T02:26:09,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-11T02:26:09,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 18 msec 2024-12-11T02:26:09,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-11T02:26:09,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-11T02:26:09,214 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.536sec 2024-12-11T02:26:09,216 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T02:26:09,218 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T02:26:09,219 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T02:26:09,219 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T02:26:09,219 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T02:26:09,220 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T02:26:09,221 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T02:26:09,228 DEBUG [master/5f57a24c5131:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-11T02:26:09,229 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T02:26:09,229 INFO [master/5f57a24c5131:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f57a24c5131,40407,1733883963836-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T02:26:09,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-12-11T02:26:09,260 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-11T02:26:09,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:09,271 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T02:26:09,271 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T02:26:09,281 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:09,289 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:09,299 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=5f57a24c5131,40407,1733883963836 2024-12-11T02:26:09,316 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=125, ProcessCount=11, AvailableMemoryMB=5060 2024-12-11T02:26:09,327 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:26:09,350 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:26:09,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:26:09,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:26:09,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:09,372 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:26:09,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-11T02:26:09,372 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:09,375 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:26:09,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:09,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741837_1013 (size=963) 2024-12-11T02:26:09,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:09,796 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:26:09,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741838_1014 (size=53) 2024-12-11T02:26:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:10,208 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:10,208 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 422539d3733f091ff661b5e7e0fc5956, disabling compactions & flushes 2024-12-11T02:26:10,209 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,209 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,209 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. after waiting 0 ms 2024-12-11T02:26:10,209 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,209 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,209 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:10,211 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:26:10,212 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733883970211"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733883970211"}]},"ts":"1733883970211"} 2024-12-11T02:26:10,215 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:26:10,217 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:26:10,217 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883970217"}]},"ts":"1733883970217"} 2024-12-11T02:26:10,219 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:26:10,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, ASSIGN}] 2024-12-11T02:26:10,226 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, ASSIGN 2024-12-11T02:26:10,228 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:26:10,378 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=422539d3733f091ff661b5e7e0fc5956, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:10,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:10,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:10,542 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,542 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:10,543 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,543 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:10,543 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,543 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,546 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,549 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:10,550 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 422539d3733f091ff661b5e7e0fc5956 columnFamilyName A 2024-12-11T02:26:10,550 DEBUG [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:10,552 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(327): Store=422539d3733f091ff661b5e7e0fc5956/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:10,552 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,554 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:10,554 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 422539d3733f091ff661b5e7e0fc5956 columnFamilyName B 2024-12-11T02:26:10,555 DEBUG [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:10,556 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(327): Store=422539d3733f091ff661b5e7e0fc5956/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:10,556 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,558 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:10,558 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 422539d3733f091ff661b5e7e0fc5956 columnFamilyName C 2024-12-11T02:26:10,559 DEBUG [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:10,559 INFO [StoreOpener-422539d3733f091ff661b5e7e0fc5956-1 {}] regionserver.HStore(327): Store=422539d3733f091ff661b5e7e0fc5956/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:10,560 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,562 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,562 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,565 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:26:10,567 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:10,571 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:26:10,572 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 422539d3733f091ff661b5e7e0fc5956; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72824347, jitterRate=0.08516733348369598}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:26:10,573 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:10,574 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., pid=11, masterSystemTime=1733883970535 2024-12-11T02:26:10,577 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,577 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:10,578 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=422539d3733f091ff661b5e7e0fc5956, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:10,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-11T02:26:10,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 in 199 msec 2024-12-11T02:26:10,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-11T02:26:10,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, ASSIGN in 360 msec 2024-12-11T02:26:10,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:26:10,588 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883970588"}]},"ts":"1733883970588"} 2024-12-11T02:26:10,591 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:26:10,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:26:10,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2280 sec 2024-12-11T02:26:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T02:26:11,503 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-11T02:26:11,508 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-12-11T02:26:11,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,515 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,517 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,521 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:26:11,523 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:26:11,531 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-12-11T02:26:11,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,537 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-12-11T02:26:11,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-11T02:26:11,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,550 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-12-11T02:26:11,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-12-11T02:26:11,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,560 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-12-11T02:26:11,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-12-11T02:26:11,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,572 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-11T02:26:11,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,577 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-12-11T02:26:11,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:11,586 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,586 DEBUG [hconnection-0x6c4136b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,586 DEBUG [hconnection-0x4c21a880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,587 DEBUG [hconnection-0x1c8a5105-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,587 DEBUG [hconnection-0x2d164ddf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,589 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,590 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,591 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:11,598 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,601 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-11T02:26:11,605 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:11,606 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,608 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:11,609 DEBUG [hconnection-0x3b66a271-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:11,612 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,612 DEBUG [hconnection-0x2004d1c2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:11,616 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,626 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,632 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,638 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:11,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:11,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:11,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:11,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:11,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:11,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:11,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:11,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:11,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:11,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:11,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:11,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:11,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/e62ffbdcd51d4002ba1b0433c9785f16 is 50, key is test_row_0/A:col10/1733883971629/Put/seqid=0 2024-12-11T02:26:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741839_1015 (size=12001) 2024-12-11T02:26:11,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884031848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/e62ffbdcd51d4002ba1b0433c9785f16 2024-12-11T02:26:11,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884031844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884031868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884031868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:11,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884031870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:11,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:11,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:11,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:11,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:11,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:11,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:11,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:11,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884032007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884032008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6f963a4fe50247d28916067fb847b06a is 50, key is test_row_0/B:col10/1733883971629/Put/seqid=0 2024-12-11T02:26:12,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741840_1016 (size=12001) 2024-12-11T02:26:12,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6f963a4fe50247d28916067fb847b06a 2024-12-11T02:26:12,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/1b6449bef95c43c8b5bc38e705c75dc9 is 50, key is test_row_0/C:col10/1733883971629/Put/seqid=0 2024-12-11T02:26:12,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741841_1017 (size=12001) 2024-12-11T02:26:12,144 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/1b6449bef95c43c8b5bc38e705c75dc9 2024-12-11T02:26:12,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/e62ffbdcd51d4002ba1b0433c9785f16 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16 2024-12-11T02:26:12,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T02:26:12,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6f963a4fe50247d28916067fb847b06a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a 2024-12-11T02:26:12,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T02:26:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:12,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/1b6449bef95c43c8b5bc38e705c75dc9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9 2024-12-11T02:26:12,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884032216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884032220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T02:26:12,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 422539d3733f091ff661b5e7e0fc5956 in 572ms, sequenceid=12, compaction requested=false 2024-12-11T02:26:12,236 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T02:26:12,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:12,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:12,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:12,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/9466f3a5fc7c49bfb7ee51e8f461125c is 50, key is test_row_0/A:col10/1733883971837/Put/seqid=0 2024-12-11T02:26:12,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741842_1018 (size=14341) 2024-12-11T02:26:12,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/9466f3a5fc7c49bfb7ee51e8f461125c 2024-12-11T02:26:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/982cdd6602e341d89ab28c566f5b4e6b is 50, key is test_row_0/B:col10/1733883971837/Put/seqid=0 2024-12-11T02:26:12,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741843_1019 (size=12001) 2024-12-11T02:26:12,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/982cdd6602e341d89ab28c566f5b4e6b 2024-12-11T02:26:12,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/38b059b159ec4211b85f20086534ced5 is 50, key is test_row_0/C:col10/1733883971837/Put/seqid=0 2024-12-11T02:26:12,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741844_1020 (size=12001) 2024-12-11T02:26:12,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/38b059b159ec4211b85f20086534ced5 2024-12-11T02:26:12,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/9466f3a5fc7c49bfb7ee51e8f461125c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c 2024-12-11T02:26:12,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c, entries=200, sequenceid=39, filesize=14.0 K 2024-12-11T02:26:12,470 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-11T02:26:12,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/982cdd6602e341d89ab28c566f5b4e6b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b 2024-12-11T02:26:12,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b, entries=150, sequenceid=39, filesize=11.7 K 2024-12-11T02:26:12,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/38b059b159ec4211b85f20086534ced5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5 2024-12-11T02:26:12,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5, entries=150, sequenceid=39, filesize=11.7 K 2024-12-11T02:26:12,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 422539d3733f091ff661b5e7e0fc5956 in 255ms, sequenceid=39, compaction requested=false 2024-12-11T02:26:12,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:12,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:12,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:12,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:12,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:12,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:12,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/8773f3edbc5c4fc690f2a8ae25b9e0fc is 50, key is test_row_0/A:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:12,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741845_1021 (size=16681) 2024-12-11T02:26:12,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/8773f3edbc5c4fc690f2a8ae25b9e0fc 2024-12-11T02:26:12,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 is 50, key is test_row_0/B:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:12,633 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741846_1022 (size=12001) 2024-12-11T02:26:12,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 2024-12-11T02:26:12,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/f4fb3a15e30244bda564ea2022d1ad25 is 50, key is test_row_0/C:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:12,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884032687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:12,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884032714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741847_1023 (size=12001) 2024-12-11T02:26:12,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/f4fb3a15e30244bda564ea2022d1ad25 2024-12-11T02:26:12,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/8773f3edbc5c4fc690f2a8ae25b9e0fc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc 2024-12-11T02:26:12,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc, entries=250, sequenceid=50, filesize=16.3 K 2024-12-11T02:26:12,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 2024-12-11T02:26:12,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:26:12,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/f4fb3a15e30244bda564ea2022d1ad25 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25 2024-12-11T02:26:12,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:26:12,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 422539d3733f091ff661b5e7e0fc5956 in 266ms, sequenceid=50, compaction requested=true 2024-12-11T02:26:12,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:12,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:12,812 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:12,813 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:12,817 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:12,819 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:12,819 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,819 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.2 K 2024-12-11T02:26:12,821 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f963a4fe50247d28916067fb847b06a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733883971603 2024-12-11T02:26:12,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 982cdd6602e341d89ab28c566f5b4e6b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733883971837 2024-12-11T02:26:12,823 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 14088e5b0e6f45caaf7a2fc6eed4d3a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:12,824 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:12,824 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:12,824 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,824 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=42.0 K 2024-12-11T02:26:12,832 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e62ffbdcd51d4002ba1b0433c9785f16, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733883971603 2024-12-11T02:26:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:12,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:26:12,840 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9466f3a5fc7c49bfb7ee51e8f461125c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733883971837 2024-12-11T02:26:12,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:12,846 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8773f3edbc5c4fc690f2a8ae25b9e0fc, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:12,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c58fd00a6a75413a8d37af531d784862 is 50, key is test_row_0/A:col10/1733883972836/Put/seqid=0 2024-12-11T02:26:12,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884032864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884032869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,904 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#11 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:12,904 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#10 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:12,905 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/21c86aa6b1cf4365a4512b76632a56d8 is 50, key is test_row_0/A:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:12,905 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ae26cfad364c4d1dabf92856b72d7c8b is 50, key is test_row_0/B:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741848_1024 (size=12001) 2024-12-11T02:26:12,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c58fd00a6a75413a8d37af531d784862 2024-12-11T02:26:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741849_1025 (size=12104) 2024-12-11T02:26:12,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/5bfb029ec38d465783025a5ea71dc752 is 50, key is test_row_0/B:col10/1733883972836/Put/seqid=0 2024-12-11T02:26:12,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:12,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:12,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:12,961 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ae26cfad364c4d1dabf92856b72d7c8b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ae26cfad364c4d1dabf92856b72d7c8b 2024-12-11T02:26:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741850_1026 (size=12104) 2024-12-11T02:26:12,977 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/21c86aa6b1cf4365a4512b76632a56d8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21c86aa6b1cf4365a4512b76632a56d8 2024-12-11T02:26:12,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884032974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884032983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884032983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:12,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884032986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:12,994 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 21c86aa6b1cf4365a4512b76632a56d8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:12,994 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into ae26cfad364c4d1dabf92856b72d7c8b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:12,994 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:12,994 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:12,994 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883972810; duration=0sec 2024-12-11T02:26:12,994 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883972812; duration=0sec 2024-12-11T02:26:12,994 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:12,995 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:12,995 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:12,995 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:12,995 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741851_1027 (size=12001) 2024-12-11T02:26:12,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/5bfb029ec38d465783025a5ea71dc752 2024-12-11T02:26:12,998 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:12,998 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:12,998 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:12,998 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.2 K 2024-12-11T02:26:12,999 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b6449bef95c43c8b5bc38e705c75dc9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733883971603 2024-12-11T02:26:13,001 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38b059b159ec4211b85f20086534ced5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733883971837 2024-12-11T02:26:13,002 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4fb3a15e30244bda564ea2022d1ad25, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:13,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884032992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3d074bda511f4ca1b43dd17ac00753be is 50, key is test_row_0/C:col10/1733883972836/Put/seqid=0 2024-12-11T02:26:13,034 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#14 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:13,035 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/41fc4d25ee1442ddb7b3a9520d35ead7 is 50, key is test_row_0/C:col10/1733883972267/Put/seqid=0 2024-12-11T02:26:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741852_1028 (size=12104) 2024-12-11T02:26:13,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741853_1029 (size=12001) 2024-12-11T02:26:13,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3d074bda511f4ca1b43dd17ac00753be 2024-12-11T02:26:13,081 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/41fc4d25ee1442ddb7b3a9520d35ead7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/41fc4d25ee1442ddb7b3a9520d35ead7 2024-12-11T02:26:13,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c58fd00a6a75413a8d37af531d784862 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862 2024-12-11T02:26:13,098 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 41fc4d25ee1442ddb7b3a9520d35ead7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:13,098 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:13,098 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883972812; duration=0sec 2024-12-11T02:26:13,098 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:13,098 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:13,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T02:26:13,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/5bfb029ec38d465783025a5ea71dc752 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752 2024-12-11T02:26:13,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T02:26:13,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3d074bda511f4ca1b43dd17ac00753be as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be 2024-12-11T02:26:13,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T02:26:13,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 422539d3733f091ff661b5e7e0fc5956 in 297ms, sequenceid=77, compaction requested=false 2024-12-11T02:26:13,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:13,161 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:26:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:13,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:13,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:13,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d7e14a3580dd4ff69e36972b9aa46a48 is 50, key is test_row_0/A:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:13,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741854_1030 (size=12001) 2024-12-11T02:26:13,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d7e14a3580dd4ff69e36972b9aa46a48 2024-12-11T02:26:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T02:26:13,282 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-11T02:26:13,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/da6b928ff11c449093c6b22eef965a1e is 50, key is test_row_0/B:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:13,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884033290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884033291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884033292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884033295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884033297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741855_1031 (size=12001) 2024-12-11T02:26:13,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/da6b928ff11c449093c6b22eef965a1e 2024-12-11T02:26:13,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8145e623584e446aadffe66803004499 is 50, key is test_row_0/C:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741856_1032 (size=12001) 2024-12-11T02:26:13,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884033401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884033403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884033402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884033405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884033406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,418 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,573 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884033608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884033607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884033609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884033609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:13,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884033611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:13,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:13,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8145e623584e446aadffe66803004499 2024-12-11T02:26:13,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d7e14a3580dd4ff69e36972b9aa46a48 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48 2024-12-11T02:26:13,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T02:26:13,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/da6b928ff11c449093c6b22eef965a1e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e 2024-12-11T02:26:13,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T02:26:13,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8145e623584e446aadffe66803004499 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499 2024-12-11T02:26:13,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T02:26:13,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 422539d3733f091ff661b5e7e0fc5956 in 644ms, sequenceid=92, compaction requested=true 2024-12-11T02:26:13,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:13,847 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:13,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:13,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:13,850 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:13,850 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:13,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:13,850 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:13,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:13,851 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:13,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:13,851 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21c86aa6b1cf4365a4512b76632a56d8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.3 K 2024-12-11T02:26:13,852 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 21c86aa6b1cf4365a4512b76632a56d8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:13,853 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c58fd00a6a75413a8d37af531d784862, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733883972683 2024-12-11T02:26:13,854 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:13,854 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:13,854 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,854 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ae26cfad364c4d1dabf92856b72d7c8b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.3 K 2024-12-11T02:26:13,855 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d7e14a3580dd4ff69e36972b9aa46a48, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:13,855 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae26cfad364c4d1dabf92856b72d7c8b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:13,856 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bfb029ec38d465783025a5ea71dc752, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733883972683 2024-12-11T02:26:13,859 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting da6b928ff11c449093c6b22eef965a1e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:13,883 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:13,884 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:13,884 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/7f5cbcd6b40a4b5fa47fa05abb9acebf is 50, key is test_row_0/B:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:13,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T02:26:13,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,886 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:26:13,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:13,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:13,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:13,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:13,893 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#19 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:13,899 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/5288bbc23a284a48b19b3135db2aa695 is 50, key is test_row_0/A:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:13,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741857_1033 (size=12207) 2024-12-11T02:26:13,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/06c23403c85f4c06a6cbd25581a86ada is 50, key is test_row_0/A:col10/1733883973290/Put/seqid=0 2024-12-11T02:26:13,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741858_1034 (size=12207) 2024-12-11T02:26:13,935 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/7f5cbcd6b40a4b5fa47fa05abb9acebf as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7f5cbcd6b40a4b5fa47fa05abb9acebf 2024-12-11T02:26:13,941 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/5288bbc23a284a48b19b3135db2aa695 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/5288bbc23a284a48b19b3135db2aa695 2024-12-11T02:26:13,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741859_1035 (size=12001) 2024-12-11T02:26:13,986 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into 7f5cbcd6b40a4b5fa47fa05abb9acebf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:13,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:13,987 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883973849; duration=0sec 2024-12-11T02:26:13,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:13,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:13,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:13,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:13,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:13,988 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 5288bbc23a284a48b19b3135db2aa695(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:13,988 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:13,988 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883973847; duration=0sec 2024-12-11T02:26:13,988 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:13,989 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:13,995 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:13,995 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:13,995 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:13,996 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/41fc4d25ee1442ddb7b3a9520d35ead7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.3 K 2024-12-11T02:26:13,997 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41fc4d25ee1442ddb7b3a9520d35ead7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733883972267 2024-12-11T02:26:13,998 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d074bda511f4ca1b43dd17ac00753be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733883972683 2024-12-11T02:26:13,998 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8145e623584e446aadffe66803004499, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:14,015 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:14,016 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d27ab1af78384da4a025c0fc883ecc27 is 50, key is test_row_0/C:col10/1733883972865/Put/seqid=0 2024-12-11T02:26:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741860_1036 (size=12207) 2024-12-11T02:26:14,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884034002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884034003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884034055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884034058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884034058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884034157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884034157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884034162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884034163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884034177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,350 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/06c23403c85f4c06a6cbd25581a86ada 2024-12-11T02:26:14,352 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T02:26:14,352 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T02:26:14,355 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-11T02:26:14,355 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-11T02:26:14,357 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T02:26:14,357 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T02:26:14,357 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T02:26:14,357 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T02:26:14,359 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-11T02:26:14,359 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-11T02:26:14,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884034362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884034364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884034368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884034370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884034384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1f022733ac1c4089a982137093ac0d93 is 50, key is test_row_0/B:col10/1733883973290/Put/seqid=0 2024-12-11T02:26:14,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741861_1037 (size=12001) 2024-12-11T02:26:14,425 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1f022733ac1c4089a982137093ac0d93 2024-12-11T02:26:14,448 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d27ab1af78384da4a025c0fc883ecc27 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d27ab1af78384da4a025c0fc883ecc27 2024-12-11T02:26:14,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/95d2c00ebae44b55b9f08ba01dcebb27 is 50, key is test_row_0/C:col10/1733883973290/Put/seqid=0 2024-12-11T02:26:14,462 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into d27ab1af78384da4a025c0fc883ecc27(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:14,462 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:14,462 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883973851; duration=0sec 2024-12-11T02:26:14,462 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:14,463 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741862_1038 (size=12001) 2024-12-11T02:26:14,481 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/95d2c00ebae44b55b9f08ba01dcebb27 2024-12-11T02:26:14,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/06c23403c85f4c06a6cbd25581a86ada as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada 2024-12-11T02:26:14,511 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T02:26:14,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1f022733ac1c4089a982137093ac0d93 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93 2024-12-11T02:26:14,532 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T02:26:14,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/95d2c00ebae44b55b9f08ba01dcebb27 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27 2024-12-11T02:26:14,548 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T02:26:14,551 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 422539d3733f091ff661b5e7e0fc5956 in 665ms, sequenceid=116, compaction requested=false 2024-12-11T02:26:14,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:14,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:14,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-11T02:26:14,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-11T02:26:14,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-11T02:26:14,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9440 sec 2024-12-11T02:26:14,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.9660 sec 2024-12-11T02:26:14,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:14,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:14,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:14,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f70d47315944415d948824d791f4c21f is 50, key is test_row_0/A:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:14,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884034713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884034719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884034719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884034721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884034722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741863_1039 (size=12101) 2024-12-11T02:26:14,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f70d47315944415d948824d791f4c21f 2024-12-11T02:26:14,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1ffa7641ec224dad9796d220a63cacb6 is 50, key is test_row_0/B:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:14,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741864_1040 (size=12101) 2024-12-11T02:26:14,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1ffa7641ec224dad9796d220a63cacb6 2024-12-11T02:26:14,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/ce30034d54034cf981e191616bfe81ce is 50, key is test_row_0/C:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:14,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884034826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884034826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884034827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:14,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884034827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884034828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:14,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741865_1041 (size=12101) 2024-12-11T02:26:14,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/ce30034d54034cf981e191616bfe81ce 2024-12-11T02:26:14,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f70d47315944415d948824d791f4c21f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f 2024-12-11T02:26:14,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f, entries=150, sequenceid=134, filesize=11.8 K 2024-12-11T02:26:14,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1ffa7641ec224dad9796d220a63cacb6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6 2024-12-11T02:26:14,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6, entries=150, sequenceid=134, filesize=11.8 K 2024-12-11T02:26:14,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/ce30034d54034cf981e191616bfe81ce as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce 2024-12-11T02:26:14,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce, entries=150, sequenceid=134, filesize=11.8 K 2024-12-11T02:26:14,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 422539d3733f091ff661b5e7e0fc5956 in 241ms, sequenceid=134, compaction requested=true 2024-12-11T02:26:14,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:14,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:14,917 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:14,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:14,917 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:14,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:14,920 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:14,920 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:14,920 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:14,920 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:14,920 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:14,920 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:14,920 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/5288bbc23a284a48b19b3135db2aa695, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.5 K 2024-12-11T02:26:14,921 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7f5cbcd6b40a4b5fa47fa05abb9acebf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.5 K 2024-12-11T02:26:14,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:14,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:14,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:14,922 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5288bbc23a284a48b19b3135db2aa695, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:14,922 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f5cbcd6b40a4b5fa47fa05abb9acebf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:14,923 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06c23403c85f4c06a6cbd25581a86ada, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733883973287 2024-12-11T02:26:14,923 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f022733ac1c4089a982137093ac0d93, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733883973287 2024-12-11T02:26:14,924 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f70d47315944415d948824d791f4c21f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:14,926 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ffa7641ec224dad9796d220a63cacb6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:14,960 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#28 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:14,962 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#27 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:14,963 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/cb17666e57fb46d4a69db893da02d75e is 50, key is test_row_0/A:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:14,964 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f78a35c0d05e4a0c84cfb6b657c00347 is 50, key is test_row_0/B:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:14,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741867_1043 (size=12409) 2024-12-11T02:26:14,986 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/cb17666e57fb46d4a69db893da02d75e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/cb17666e57fb46d4a69db893da02d75e 2024-12-11T02:26:15,001 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into cb17666e57fb46d4a69db893da02d75e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:15,001 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,001 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883974917; duration=0sec 2024-12-11T02:26:15,002 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:15,002 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:15,002 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741866_1042 (size=12409) 2024-12-11T02:26:15,005 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:15,005 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:15,006 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,006 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d27ab1af78384da4a025c0fc883ecc27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.5 K 2024-12-11T02:26:15,007 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d27ab1af78384da4a025c0fc883ecc27, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733883972865 2024-12-11T02:26:15,008 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95d2c00ebae44b55b9f08ba01dcebb27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733883973287 2024-12-11T02:26:15,010 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce30034d54034cf981e191616bfe81ce, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:15,021 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f78a35c0d05e4a0c84cfb6b657c00347 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f78a35c0d05e4a0c84cfb6b657c00347 2024-12-11T02:26:15,036 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into f78a35c0d05e4a0c84cfb6b657c00347(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:15,036 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,036 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883974917; duration=0sec 2024-12-11T02:26:15,037 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:15,037 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:15,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:26:15,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:15,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:15,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:15,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:15,047 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:15,048 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8d133dadc4df4353bff395ebb1ce4901 is 50, key is test_row_0/C:col10/1733883974008/Put/seqid=0 2024-12-11T02:26:15,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b5c021239c9249689a2c82cefd93a1df is 50, key is test_row_0/A:col10/1733883975036/Put/seqid=0 2024-12-11T02:26:15,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741868_1044 (size=14541) 2024-12-11T02:26:15,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b5c021239c9249689a2c82cefd93a1df 2024-12-11T02:26:15,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 is 50, key is test_row_0/B:col10/1733883975036/Put/seqid=0 2024-12-11T02:26:15,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741869_1045 (size=12409) 2024-12-11T02:26:15,142 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8d133dadc4df4353bff395ebb1ce4901 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8d133dadc4df4353bff395ebb1ce4901 2024-12-11T02:26:15,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741870_1046 (size=12151) 2024-12-11T02:26:15,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 2024-12-11T02:26:15,160 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 8d133dadc4df4353bff395ebb1ce4901(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:15,160 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,160 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883974921; duration=0sec 2024-12-11T02:26:15,160 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:15,160 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:15,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a314f77b630d4887bd9148b707d2adf1 is 50, key is test_row_0/C:col10/1733883975036/Put/seqid=0 2024-12-11T02:26:15,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741871_1047 (size=12151) 2024-12-11T02:26:15,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a314f77b630d4887bd9148b707d2adf1 2024-12-11T02:26:15,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b5c021239c9249689a2c82cefd93a1df as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df 2024-12-11T02:26:15,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df, entries=200, sequenceid=159, filesize=14.2 K 2024-12-11T02:26:15,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 2024-12-11T02:26:15,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29, entries=150, sequenceid=159, filesize=11.9 K 2024-12-11T02:26:15,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a314f77b630d4887bd9148b707d2adf1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1 2024-12-11T02:26:15,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1, entries=150, sequenceid=159, filesize=11.9 K 2024-12-11T02:26:15,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 422539d3733f091ff661b5e7e0fc5956 in 223ms, sequenceid=159, compaction requested=false 2024-12-11T02:26:15,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:26:15,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:15,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:15,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/59370aa358f94bffaf2d417a034f2ebc is 50, key is test_row_0/A:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741872_1048 (size=14541) 2024-12-11T02:26:15,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/59370aa358f94bffaf2d417a034f2ebc 2024-12-11T02:26:15,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ba9bd185b2fb4081a0ebfc23d479116b is 50, key is test_row_0/B:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741873_1049 (size=12151) 2024-12-11T02:26:15,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ba9bd185b2fb4081a0ebfc23d479116b 2024-12-11T02:26:15,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b50965091c94458395f7deaeac10e917 is 50, key is test_row_0/C:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741874_1050 (size=12151) 2024-12-11T02:26:15,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b50965091c94458395f7deaeac10e917 2024-12-11T02:26:15,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/59370aa358f94bffaf2d417a034f2ebc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc 2024-12-11T02:26:15,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc, entries=200, sequenceid=175, filesize=14.2 K 2024-12-11T02:26:15,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ba9bd185b2fb4081a0ebfc23d479116b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b 2024-12-11T02:26:15,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T02:26:15,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b50965091c94458395f7deaeac10e917 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917 2024-12-11T02:26:15,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T02:26:15,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 422539d3733f091ff661b5e7e0fc5956 in 228ms, sequenceid=175, compaction requested=true 2024-12-11T02:26:15,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,608 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:15,609 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:15,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:15,610 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41491 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:15,610 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:15,611 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,611 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/cb17666e57fb46d4a69db893da02d75e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=40.5 K 2024-12-11T02:26:15,611 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:15,611 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:15,611 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,612 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f78a35c0d05e4a0c84cfb6b657c00347, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.9 K 2024-12-11T02:26:15,612 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb17666e57fb46d4a69db893da02d75e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:15,612 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f78a35c0d05e4a0c84cfb6b657c00347, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:15,613 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5c021239c9249689a2c82cefd93a1df, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733883974708 2024-12-11T02:26:15,613 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ebe61bd92b44f4f89e30b8b9d8b3f29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733883974718 2024-12-11T02:26:15,613 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59370aa358f94bffaf2d417a034f2ebc, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975057 2024-12-11T02:26:15,614 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ba9bd185b2fb4081a0ebfc23d479116b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975060 2024-12-11T02:26:15,638 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:15,639 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:15,639 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/0df88f1bca1c47fcb5cb2677f4ebe899 is 50, key is test_row_0/B:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,640 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/9b817c1730af4c1a820eec9a5d0cdf7b is 50, key is test_row_0/A:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741875_1051 (size=12561) 2024-12-11T02:26:15,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741876_1052 (size=12561) 2024-12-11T02:26:15,711 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/9b817c1730af4c1a820eec9a5d0cdf7b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9b817c1730af4c1a820eec9a5d0cdf7b 2024-12-11T02:26:15,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T02:26:15,719 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-11T02:26:15,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-11T02:26:15,724 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:15,726 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:15,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:15,727 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 9b817c1730af4c1a820eec9a5d0cdf7b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:15,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:15,728 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883975608; duration=0sec 2024-12-11T02:26:15,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:15,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:15,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:15,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:15,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:15,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8d133dadc4df4353bff395ebb1ce4901, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=35.9 K 2024-12-11T02:26:15,736 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d133dadc4df4353bff395ebb1ce4901, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733883973920 2024-12-11T02:26:15,737 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a314f77b630d4887bd9148b707d2adf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733883974718 2024-12-11T02:26:15,737 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b50965091c94458395f7deaeac10e917, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975060 2024-12-11T02:26:15,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:15,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:26:15,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:15,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:15,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:15,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:15,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b86a9c26315d4f01bc7d48cfdea7bc6e is 50, key is test_row_0/A:col10/1733883975736/Put/seqid=0 2024-12-11T02:26:15,752 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:15,753 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/da6a5615cae04b9f9d645ebdbc5fdc97 is 50, key is test_row_0/C:col10/1733883975377/Put/seqid=0 2024-12-11T02:26:15,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741877_1053 (size=12151) 2024-12-11T02:26:15,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b86a9c26315d4f01bc7d48cfdea7bc6e 2024-12-11T02:26:15,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741878_1054 (size=12561) 2024-12-11T02:26:15,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/68aaaa7b118f46829131c9f7c4ab9001 is 50, key is test_row_0/B:col10/1733883975736/Put/seqid=0 2024-12-11T02:26:15,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741879_1055 (size=12151) 2024-12-11T02:26:15,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:15,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884035875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884035875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884035875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T02:26:15,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:15,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:15,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:15,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:15,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:15,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884035886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:15,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:15,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884035888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:16,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T02:26:16,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:16,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884036079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884036080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884036080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884036093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884036097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,103 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/0df88f1bca1c47fcb5cb2677f4ebe899 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/0df88f1bca1c47fcb5cb2677f4ebe899 2024-12-11T02:26:16,113 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into 0df88f1bca1c47fcb5cb2677f4ebe899(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:16,113 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:16,114 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883975609; duration=0sec 2024-12-11T02:26:16,114 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:16,114 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:16,187 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/da6a5615cae04b9f9d645ebdbc5fdc97 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/da6a5615cae04b9f9d645ebdbc5fdc97 2024-12-11T02:26:16,191 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T02:26:16,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:16,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:16,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/68aaaa7b118f46829131c9f7c4ab9001 2024-12-11T02:26:16,200 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into da6a5615cae04b9f9d645ebdbc5fdc97(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:16,200 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:16,200 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883975609; duration=0sec 2024-12-11T02:26:16,200 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:16,200 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:16,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3ce40ce569324bd8a72fd9ae1421d007 is 50, key is test_row_0/C:col10/1733883975736/Put/seqid=0 2024-12-11T02:26:16,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741880_1056 (size=12151) 2024-12-11T02:26:16,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3ce40ce569324bd8a72fd9ae1421d007 2024-12-11T02:26:16,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/b86a9c26315d4f01bc7d48cfdea7bc6e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e 2024-12-11T02:26:16,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:26:16,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/68aaaa7b118f46829131c9f7c4ab9001 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001 2024-12-11T02:26:16,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:26:16,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3ce40ce569324bd8a72fd9ae1421d007 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007 2024-12-11T02:26:16,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:26:16,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 422539d3733f091ff661b5e7e0fc5956 in 559ms, sequenceid=200, compaction requested=false 2024-12-11T02:26:16,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:16,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T02:26:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,347 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:26:16,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:16,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:16,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:16,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:16,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:16,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:16,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/215dc3d983cf4192987e56d812bb1e08 is 50, key is test_row_0/A:col10/1733883975759/Put/seqid=0 2024-12-11T02:26:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741881_1057 (size=12151) 2024-12-11T02:26:16,377 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/215dc3d983cf4192987e56d812bb1e08 2024-12-11T02:26:16,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:16,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:16,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b314120db62b428abce5f47870cdc0ea is 50, key is test_row_0/B:col10/1733883975759/Put/seqid=0 2024-12-11T02:26:16,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741882_1058 (size=12151) 2024-12-11T02:26:16,409 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b314120db62b428abce5f47870cdc0ea 2024-12-11T02:26:16,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/9853472c21ce468daa05b5e3c207e9ec is 50, key is test_row_0/C:col10/1733883975759/Put/seqid=0 2024-12-11T02:26:16,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884036430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884036433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884036429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884036433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884036434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741883_1059 (size=12151) 2024-12-11T02:26:16,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884036539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884036540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884036541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884036542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884036542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884036745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884036748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884036748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884036748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:16,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884036750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:16,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:16,854 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/9853472c21ce468daa05b5e3c207e9ec 2024-12-11T02:26:16,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/215dc3d983cf4192987e56d812bb1e08 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08 2024-12-11T02:26:16,879 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08, entries=150, sequenceid=215, filesize=11.9 K 2024-12-11T02:26:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b314120db62b428abce5f47870cdc0ea as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea 2024-12-11T02:26:16,890 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea, entries=150, sequenceid=215, filesize=11.9 K 2024-12-11T02:26:16,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/9853472c21ce468daa05b5e3c207e9ec as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec 2024-12-11T02:26:16,900 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec, entries=150, sequenceid=215, filesize=11.9 K 2024-12-11T02:26:16,901 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 422539d3733f091ff661b5e7e0fc5956 in 554ms, sequenceid=215, compaction requested=true 2024-12-11T02:26:16,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:16,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:16,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-11T02:26:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-11T02:26:16,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-11T02:26:16,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1780 sec 2024-12-11T02:26:16,909 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1850 sec 2024-12-11T02:26:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:17,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:17,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7938759c0dd4448785bbb5cb3b768d8c is 50, key is test_row_0/A:col10/1733883976432/Put/seqid=0 2024-12-11T02:26:17,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884037069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884037070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884037071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884037075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884037076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741884_1060 (size=12151) 2024-12-11T02:26:17,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884037177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884037178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884037180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884037182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884037182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884037386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884037386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884037388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884037388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884037385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7938759c0dd4448785bbb5cb3b768d8c 2024-12-11T02:26:17,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b5ad281445f34ae8acb7742fd0ebfded is 50, key is test_row_0/B:col10/1733883976432/Put/seqid=0 2024-12-11T02:26:17,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741885_1061 (size=12151) 2024-12-11T02:26:17,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b5ad281445f34ae8acb7742fd0ebfded 2024-12-11T02:26:17,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b4f8a5b8e4fa44dda74443df6ed59b55 is 50, key is test_row_0/C:col10/1733883976432/Put/seqid=0 2024-12-11T02:26:17,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741886_1062 (size=12151) 2024-12-11T02:26:17,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b4f8a5b8e4fa44dda74443df6ed59b55 2024-12-11T02:26:17,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7938759c0dd4448785bbb5cb3b768d8c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c 2024-12-11T02:26:17,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c, entries=150, sequenceid=240, filesize=11.9 K 2024-12-11T02:26:17,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b5ad281445f34ae8acb7742fd0ebfded as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded 2024-12-11T02:26:17,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded, entries=150, sequenceid=240, filesize=11.9 K 2024-12-11T02:26:17,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b4f8a5b8e4fa44dda74443df6ed59b55 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55 2024-12-11T02:26:17,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55, entries=150, sequenceid=240, filesize=11.9 K 2024-12-11T02:26:17,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 422539d3733f091ff661b5e7e0fc5956 in 594ms, sequenceid=240, compaction requested=true 2024-12-11T02:26:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:17,650 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:17,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:17,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:17,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:17,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T02:26:17,651 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:17,653 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:17,653 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:17,653 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:17,653 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/0df88f1bca1c47fcb5cb2677f4ebe899, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=47.9 K 2024-12-11T02:26:17,653 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:17,654 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:17,654 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:17,654 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9b817c1730af4c1a820eec9a5d0cdf7b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=47.9 K 2024-12-11T02:26:17,654 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0df88f1bca1c47fcb5cb2677f4ebe899, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975060 2024-12-11T02:26:17,654 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b817c1730af4c1a820eec9a5d0cdf7b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975060 2024-12-11T02:26:17,654 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 68aaaa7b118f46829131c9f7c4ab9001, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733883975394 2024-12-11T02:26:17,655 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b86a9c26315d4f01bc7d48cfdea7bc6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733883975394 2024-12-11T02:26:17,655 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b314120db62b428abce5f47870cdc0ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733883975755 2024-12-11T02:26:17,655 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 215dc3d983cf4192987e56d812bb1e08, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733883975755 2024-12-11T02:26:17,655 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b5ad281445f34ae8acb7742fd0ebfded, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733883976432 2024-12-11T02:26:17,656 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7938759c0dd4448785bbb5cb3b768d8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733883976432 2024-12-11T02:26:17,676 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#48 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:17,677 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/4c9d1223076549a98b5f90cfa40bacb6 is 50, key is test_row_0/B:col10/1733883976432/Put/seqid=0 2024-12-11T02:26:17,681 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:17,682 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/fdb22b1470be4be2a6a8b79f12f992ac is 50, key is test_row_0/A:col10/1733883976432/Put/seqid=0 2024-12-11T02:26:17,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:17,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:26:17,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:17,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:17,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:17,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741887_1063 (size=12697) 2024-12-11T02:26:17,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741888_1064 (size=12697) 2024-12-11T02:26:17,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4372c41fa36e469e98ba8d5197a1aa66 is 50, key is test_row_0/A:col10/1733883977695/Put/seqid=0 2024-12-11T02:26:17,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741889_1065 (size=12151) 2024-12-11T02:26:17,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884037734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4372c41fa36e469e98ba8d5197a1aa66 2024-12-11T02:26:17,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884037738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884037740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884037740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884037743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8b8f823b8447448389e1665cca132a85 is 50, key is test_row_0/B:col10/1733883977695/Put/seqid=0 2024-12-11T02:26:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741890_1066 (size=12151) 2024-12-11T02:26:17,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8b8f823b8447448389e1665cca132a85 2024-12-11T02:26:17,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/02e5da6f34b244618664fe086acea155 is 50, key is test_row_0/C:col10/1733883977695/Put/seqid=0 2024-12-11T02:26:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T02:26:17,833 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-11T02:26:17,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-11T02:26:17,837 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T02:26:17,839 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:17,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:17,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741891_1067 (size=12151) 2024-12-11T02:26:17,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/02e5da6f34b244618664fe086acea155 2024-12-11T02:26:17,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884037846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884037848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4372c41fa36e469e98ba8d5197a1aa66 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66 2024-12-11T02:26:17,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884037849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884037850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884037850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66, entries=150, sequenceid=254, filesize=11.9 K 2024-12-11T02:26:17,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8b8f823b8447448389e1665cca132a85 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85 2024-12-11T02:26:17,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85, entries=150, sequenceid=254, filesize=11.9 K 2024-12-11T02:26:17,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/02e5da6f34b244618664fe086acea155 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155 2024-12-11T02:26:17,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155, entries=150, sequenceid=254, filesize=11.9 K 2024-12-11T02:26:17,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 422539d3733f091ff661b5e7e0fc5956 in 185ms, sequenceid=254, compaction requested=true 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:17,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-11T02:26:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T02:26:17,992 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:17,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-11T02:26:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:17,993 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:17,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:18,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/699004d0b12d43d4bffd97ffb5a283cb is 50, key is test_row_0/A:col10/1733883977736/Put/seqid=0 2024-12-11T02:26:18,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741892_1068 (size=12301) 2024-12-11T02:26:18,040 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/699004d0b12d43d4bffd97ffb5a283cb 2024-12-11T02:26:18,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:18,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:18,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/9f9848ff18f34e2285315167c5766b9b is 50, key is test_row_0/B:col10/1733883977736/Put/seqid=0 2024-12-11T02:26:18,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884038070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884038071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884038074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884038075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884038075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741893_1069 (size=12301) 2024-12-11T02:26:18,111 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/4c9d1223076549a98b5f90cfa40bacb6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4c9d1223076549a98b5f90cfa40bacb6 2024-12-11T02:26:18,125 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/fdb22b1470be4be2a6a8b79f12f992ac as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/fdb22b1470be4be2a6a8b79f12f992ac 2024-12-11T02:26:18,135 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into fdb22b1470be4be2a6a8b79f12f992ac(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:18,135 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into 4c9d1223076549a98b5f90cfa40bacb6(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:18,135 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:18,135 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:18,136 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=12, startTime=1733883977650; duration=0sec 2024-12-11T02:26:18,136 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=12, startTime=1733883977651; duration=0sec 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:26:18,136 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:18,138 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61165 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:26:18,138 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:18,138 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:18,138 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 5 compacting, 0 eligible, 16 blocking 2024-12-11T02:26:18,139 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-11T02:26:18,139 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-11T02:26:18,139 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. because compaction request was cancelled 2024-12-11T02:26:18,139 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:18,139 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/da6a5615cae04b9f9d645ebdbc5fdc97, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=59.7 K 2024-12-11T02:26:18,139 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-11T02:26:18,140 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting da6a5615cae04b9f9d645ebdbc5fdc97, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733883975060 2024-12-11T02:26:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T02:26:18,141 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ce40ce569324bd8a72fd9ae1421d007, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733883975394 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. because compaction request was cancelled 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-11T02:26:18,143 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9853472c21ce468daa05b5e3c207e9ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733883975755 2024-12-11T02:26:18,144 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4f8a5b8e4fa44dda74443df6ed59b55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733883976432 2024-12-11T02:26:18,144 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02e5da6f34b244618664fe086acea155, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733883977064 2024-12-11T02:26:18,147 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-11T02:26:18,147 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-11T02:26:18,148 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. because compaction request was cancelled 2024-12-11T02:26:18,148 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:18,167 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#55 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:18,169 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/6913f90924104a5aa1e1e01a9f123277 is 50, key is test_row_0/C:col10/1733883977695/Put/seqid=0 2024-12-11T02:26:18,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884038177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884038178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884038180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884038181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884038182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741894_1070 (size=12731) 2024-12-11T02:26:18,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884038382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884038386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884038390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884038390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884038390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T02:26:18,490 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/9f9848ff18f34e2285315167c5766b9b 2024-12-11T02:26:18,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/619528ab4f504ba4b1f1becf386f6276 is 50, key is test_row_0/C:col10/1733883977736/Put/seqid=0 2024-12-11T02:26:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741895_1071 (size=12301) 2024-12-11T02:26:18,522 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/619528ab4f504ba4b1f1becf386f6276 2024-12-11T02:26:18,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/699004d0b12d43d4bffd97ffb5a283cb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb 2024-12-11T02:26:18,538 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T02:26:18,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/9f9848ff18f34e2285315167c5766b9b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b 2024-12-11T02:26:18,548 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T02:26:18,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/619528ab4f504ba4b1f1becf386f6276 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276 2024-12-11T02:26:18,563 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T02:26:18,565 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 422539d3733f091ff661b5e7e0fc5956 in 572ms, sequenceid=276, compaction requested=true 2024-12-11T02:26:18,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:18,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:18,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-11T02:26:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-11T02:26:18,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-11T02:26:18,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 729 msec 2024-12-11T02:26:18,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 737 msec 2024-12-11T02:26:18,613 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/6913f90924104a5aa1e1e01a9f123277 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6913f90924104a5aa1e1e01a9f123277 2024-12-11T02:26:18,623 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 6913f90924104a5aa1e1e01a9f123277(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:18,623 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:18,623 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=11, startTime=1733883977883; duration=0sec 2024-12-11T02:26:18,623 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:18,623 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:18,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:18,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:18,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0fe8b0e0806144958ef147b0d1f6c455 is 50, key is test_row_0/A:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:18,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741896_1072 (size=12301) 2024-12-11T02:26:18,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0fe8b0e0806144958ef147b0d1f6c455 2024-12-11T02:26:18,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/225bb268542e4d85847a572d6d0eacff is 50, key is test_row_0/B:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:18,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884038743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884038745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884038745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884038746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884038747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741897_1073 (size=12301) 2024-12-11T02:26:18,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884038849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884038850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884038853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884038853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884038855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T02:26:18,945 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-11T02:26:18,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-11T02:26:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:18,965 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:18,966 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:18,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:19,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884039054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884039059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884039060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884039063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884039064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:19,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T02:26:19,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:19,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/225bb268542e4d85847a572d6d0eacff 2024-12-11T02:26:19,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/62002c025f8340fdb6e34658e3e6efe4 is 50, key is test_row_0/C:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:19,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741898_1074 (size=12301) 2024-12-11T02:26:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:19,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T02:26:19,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:19,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884039362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884039364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884039366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884039373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884039375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,438 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T02:26:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:19,596 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T02:26:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:19,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/62002c025f8340fdb6e34658e3e6efe4 2024-12-11T02:26:19,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0fe8b0e0806144958ef147b0d1f6c455 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455 2024-12-11T02:26:19,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455, entries=150, sequenceid=296, filesize=12.0 K 2024-12-11T02:26:19,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/225bb268542e4d85847a572d6d0eacff as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff 2024-12-11T02:26:19,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff, entries=150, sequenceid=296, filesize=12.0 K 2024-12-11T02:26:19,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/62002c025f8340fdb6e34658e3e6efe4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4 2024-12-11T02:26:19,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4, entries=150, sequenceid=296, filesize=12.0 K 2024-12-11T02:26:19,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 422539d3733f091ff661b5e7e0fc5956 in 954ms, sequenceid=296, compaction requested=true 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:19,646 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:19,646 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:19,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:19,648 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:19,648 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:19,648 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:19,648 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:19,648 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,648 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,648 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/fdb22b1470be4be2a6a8b79f12f992ac, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=48.3 K 2024-12-11T02:26:19,648 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4c9d1223076549a98b5f90cfa40bacb6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=48.3 K 2024-12-11T02:26:19,649 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c9d1223076549a98b5f90cfa40bacb6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733883976432 2024-12-11T02:26:19,649 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdb22b1470be4be2a6a8b79f12f992ac, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733883976432 2024-12-11T02:26:19,650 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b8f823b8447448389e1665cca132a85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733883977064 2024-12-11T02:26:19,650 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4372c41fa36e469e98ba8d5197a1aa66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733883977064 2024-12-11T02:26:19,650 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f9848ff18f34e2285315167c5766b9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733883977736 2024-12-11T02:26:19,650 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 699004d0b12d43d4bffd97ffb5a283cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733883977736 2024-12-11T02:26:19,651 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 225bb268542e4d85847a572d6d0eacff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:19,651 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fe8b0e0806144958ef147b0d1f6c455, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:19,665 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:19,666 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0685798a1fa34aa394e0ac195a2df046 is 50, key is test_row_0/A:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:19,674 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:19,675 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6c997fa72f08456db0a3a58d289fbb89 is 50, key is test_row_0/B:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741899_1075 (size=12983) 2024-12-11T02:26:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741900_1076 (size=12983) 2024-12-11T02:26:19,707 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6c997fa72f08456db0a3a58d289fbb89 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6c997fa72f08456db0a3a58d289fbb89 2024-12-11T02:26:19,715 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into 6c997fa72f08456db0a3a58d289fbb89(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:19,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:19,715 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=12, startTime=1733883979646; duration=0sec 2024-12-11T02:26:19,716 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:19,716 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:19,716 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:19,718 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:19,718 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:19,718 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,719 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6913f90924104a5aa1e1e01a9f123277, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.5 K 2024-12-11T02:26:19,720 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6913f90924104a5aa1e1e01a9f123277, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733883977064 2024-12-11T02:26:19,724 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 619528ab4f504ba4b1f1becf386f6276, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733883977736 2024-12-11T02:26:19,726 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 62002c025f8340fdb6e34658e3e6efe4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:19,740 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:19,741 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/c5ae27af21c04545a4139e0a21fa7193 is 50, key is test_row_0/C:col10/1733883978689/Put/seqid=0 2024-12-11T02:26:19,750 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T02:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:19,751 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T02:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:19,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:19,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:19,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:19,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:19,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:19,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/23cafecaf6e74d92b6196dde10ab02f0 is 50, key is test_row_0/A:col10/1733883978716/Put/seqid=0 2024-12-11T02:26:19,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741901_1077 (size=12983) 2024-12-11T02:26:19,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741902_1078 (size=12301) 2024-12-11T02:26:19,775 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/23cafecaf6e74d92b6196dde10ab02f0 2024-12-11T02:26:19,779 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/c5ae27af21c04545a4139e0a21fa7193 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/c5ae27af21c04545a4139e0a21fa7193 2024-12-11T02:26:19,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/cb1ffb1b19464146af2b61b6cf8922a3 is 50, key is test_row_0/B:col10/1733883978716/Put/seqid=0 2024-12-11T02:26:19,794 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into c5ae27af21c04545a4139e0a21fa7193(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:19,794 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:19,794 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883979646; duration=0sec 2024-12-11T02:26:19,794 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:19,794 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741903_1079 (size=12301) 2024-12-11T02:26:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:19,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:19,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884039885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884039888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884039885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884039888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884039888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884039992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884039994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884039994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884039995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:19,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884039995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:20,094 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0685798a1fa34aa394e0ac195a2df046 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0685798a1fa34aa394e0ac195a2df046 2024-12-11T02:26:20,106 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 0685798a1fa34aa394e0ac195a2df046(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:20,106 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:20,106 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=12, startTime=1733883979646; duration=0sec 2024-12-11T02:26:20,107 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:20,107 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:20,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884040197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884040200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884040200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884040204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,210 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/cb1ffb1b19464146af2b61b6cf8922a3 2024-12-11T02:26:20,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884040205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/def099453b7647e1850ba2373a63180b is 50, key is test_row_0/C:col10/1733883978716/Put/seqid=0 2024-12-11T02:26:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741904_1080 (size=12301) 2024-12-11T02:26:20,234 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/def099453b7647e1850ba2373a63180b 2024-12-11T02:26:20,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/23cafecaf6e74d92b6196dde10ab02f0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0 2024-12-11T02:26:20,251 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0, entries=150, sequenceid=317, filesize=12.0 K 2024-12-11T02:26:20,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/cb1ffb1b19464146af2b61b6cf8922a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3 2024-12-11T02:26:20,262 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3, entries=150, sequenceid=317, filesize=12.0 K 2024-12-11T02:26:20,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/def099453b7647e1850ba2373a63180b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b 2024-12-11T02:26:20,279 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b, entries=150, sequenceid=317, filesize=12.0 K 2024-12-11T02:26:20,282 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 422539d3733f091ff661b5e7e0fc5956 in 531ms, sequenceid=317, compaction requested=false 2024-12-11T02:26:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:20,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-11T02:26:20,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-11T02:26:20,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-11T02:26:20,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3170 sec 2024-12-11T02:26:20,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.3260 sec 2024-12-11T02:26:20,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:20,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:20,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:20,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:20,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:20,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/764f2ff017f242efb2fa6d9fff26dae2 is 50, key is test_row_0/A:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:20,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884040535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884040536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884040540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884040541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884040542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741905_1081 (size=12301) 2024-12-11T02:26:20,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/764f2ff017f242efb2fa6d9fff26dae2 2024-12-11T02:26:20,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/d5a491ad5fc642e69ca838cd279f587b is 50, key is test_row_0/B:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:20,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741906_1082 (size=12301) 2024-12-11T02:26:20,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/d5a491ad5fc642e69ca838cd279f587b 2024-12-11T02:26:20,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/24fbf2c100d444509b3ec246c39f6eba is 50, key is test_row_0/C:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:20,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741907_1083 (size=12301) 2024-12-11T02:26:20,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/24fbf2c100d444509b3ec246c39f6eba 2024-12-11T02:26:20,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/764f2ff017f242efb2fa6d9fff26dae2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2 2024-12-11T02:26:20,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2, entries=150, sequenceid=338, filesize=12.0 K 2024-12-11T02:26:20,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/d5a491ad5fc642e69ca838cd279f587b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b 2024-12-11T02:26:20,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b, entries=150, sequenceid=338, filesize=12.0 K 2024-12-11T02:26:20,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/24fbf2c100d444509b3ec246c39f6eba as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba 2024-12-11T02:26:20,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba, entries=150, sequenceid=338, filesize=12.0 K 2024-12-11T02:26:20,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 143ms, sequenceid=338, compaction requested=true 2024-12-11T02:26:20,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:20,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:20,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:20,649 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:20,649 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:20,651 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:20,651 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:20,651 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:20,651 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:20,651 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:20,651 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0685798a1fa34aa394e0ac195a2df046, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.7 K 2024-12-11T02:26:20,651 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:20,651 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6c997fa72f08456db0a3a58d289fbb89, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.7 K 2024-12-11T02:26:20,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:20,652 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0685798a1fa34aa394e0ac195a2df046, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:20,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:20,652 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c997fa72f08456db0a3a58d289fbb89, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:20,652 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23cafecaf6e74d92b6196dde10ab02f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733883978716 2024-12-11T02:26:20,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:20,653 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 764f2ff017f242efb2fa6d9fff26dae2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:20,653 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cb1ffb1b19464146af2b61b6cf8922a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733883978716 2024-12-11T02:26:20,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:20,654 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d5a491ad5fc642e69ca838cd279f587b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:20,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:20,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:20,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:20,674 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#69 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:20,675 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#70 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:20,675 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7f783c895d3b40e4b31e951514709b64 is 50, key is test_row_0/A:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:20,675 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/afc07a5fecd642d5ba6170438bc24218 is 50, key is test_row_0/B:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:20,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2075bf5cc8a54469a6eaca5942961d38 is 50, key is test_row_0/A:col10/1733883980540/Put/seqid=0 2024-12-11T02:26:20,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884040683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884040683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884040687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884040688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884040688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741908_1084 (size=13085) 2024-12-11T02:26:20,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741909_1085 (size=13085) 2024-12-11T02:26:20,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741910_1086 (size=12301) 2024-12-11T02:26:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884040791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884040790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884040794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884040795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:20,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:20,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884040796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884040996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884040998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884040996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884041000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884041000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T02:26:21,072 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-11T02:26:21,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:21,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-11T02:26:21,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:21,077 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:21,078 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:21,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:21,133 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7f783c895d3b40e4b31e951514709b64 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7f783c895d3b40e4b31e951514709b64 2024-12-11T02:26:21,141 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 7f783c895d3b40e4b31e951514709b64(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:21,141 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,141 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883980649; duration=0sec 2024-12-11T02:26:21,141 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:21,142 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:21,142 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:21,143 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:21,143 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:21,144 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,144 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/c5ae27af21c04545a4139e0a21fa7193, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.7 K 2024-12-11T02:26:21,144 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5ae27af21c04545a4139e0a21fa7193, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733883978070 2024-12-11T02:26:21,145 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting def099453b7647e1850ba2373a63180b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733883978716 2024-12-11T02:26:21,146 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24fbf2c100d444509b3ec246c39f6eba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:21,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2075bf5cc8a54469a6eaca5942961d38 2024-12-11T02:26:21,160 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:21,164 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/afc07a5fecd642d5ba6170438bc24218 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/afc07a5fecd642d5ba6170438bc24218 2024-12-11T02:26:21,164 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/274f85fecec24b3baacee988f12904b1 is 50, key is test_row_0/C:col10/1733883979876/Put/seqid=0 2024-12-11T02:26:21,173 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into afc07a5fecd642d5ba6170438bc24218(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:21,173 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,173 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883980649; duration=0sec 2024-12-11T02:26:21,174 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:21,174 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:21,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:21,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f092e29194564f7380772d77feb76ab4 is 50, key is test_row_0/B:col10/1733883980540/Put/seqid=0 2024-12-11T02:26:21,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741911_1087 (size=13085) 2024-12-11T02:26:21,213 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/274f85fecec24b3baacee988f12904b1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/274f85fecec24b3baacee988f12904b1 2024-12-11T02:26:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741912_1088 (size=12301) 2024-12-11T02:26:21,225 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 274f85fecec24b3baacee988f12904b1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:21,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f092e29194564f7380772d77feb76ab4 2024-12-11T02:26:21,225 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,225 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883980652; duration=0sec 2024-12-11T02:26:21,226 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:21,226 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:21,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:21,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:21,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/0e832a919c4841ae81939ad372e65975 is 50, key is test_row_0/C:col10/1733883980540/Put/seqid=0 2024-12-11T02:26:21,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741913_1089 (size=12301) 2024-12-11T02:26:21,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/0e832a919c4841ae81939ad372e65975 2024-12-11T02:26:21,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2075bf5cc8a54469a6eaca5942961d38 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38 2024-12-11T02:26:21,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38, entries=150, sequenceid=359, filesize=12.0 K 2024-12-11T02:26:21,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f092e29194564f7380772d77feb76ab4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4 2024-12-11T02:26:21,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4, entries=150, sequenceid=359, filesize=12.0 K 2024-12-11T02:26:21,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/0e832a919c4841ae81939ad372e65975 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975 2024-12-11T02:26:21,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884041302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884041303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884041304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884041304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975, entries=150, sequenceid=359, filesize=12.0 K 2024-12-11T02:26:21,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=93.93 KB/96180 for 422539d3733f091ff661b5e7e0fc5956 in 649ms, sequenceid=359, compaction requested=false 2024-12-11T02:26:21,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:21,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:21,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2d3d78e8467a41b986f8c76039002120 is 50, key is test_row_0/A:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741914_1090 (size=12301) 2024-12-11T02:26:21,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2d3d78e8467a41b986f8c76039002120 2024-12-11T02:26:21,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884041361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f64cb4a72e884849924b4a56363963a3 is 50, key is test_row_0/B:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741915_1091 (size=12301) 2024-12-11T02:26:21,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f64cb4a72e884849924b4a56363963a3 2024-12-11T02:26:21,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:21,386 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:21,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:21,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3c5eca28920d4389a7ebd8c1046ba770 is 50, key is test_row_0/C:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741916_1092 (size=12301) 2024-12-11T02:26:21,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3c5eca28920d4389a7ebd8c1046ba770 2024-12-11T02:26:21,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/2d3d78e8467a41b986f8c76039002120 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120 2024-12-11T02:26:21,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:26:21,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/f64cb4a72e884849924b4a56363963a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3 2024-12-11T02:26:21,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:26:21,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/3c5eca28920d4389a7ebd8c1046ba770 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770 2024-12-11T02:26:21,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:26:21,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 143ms, sequenceid=380, compaction requested=true 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:21,457 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:21,457 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:21,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:21,458 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:21,459 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:21,459 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,459 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:21,459 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7f783c895d3b40e4b31e951514709b64, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.8 K 2024-12-11T02:26:21,459 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:21,459 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,459 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/afc07a5fecd642d5ba6170438bc24218, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.8 K 2024-12-11T02:26:21,459 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f783c895d3b40e4b31e951514709b64, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:21,461 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting afc07a5fecd642d5ba6170438bc24218, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:21,461 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f092e29194564f7380772d77feb76ab4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733883980540 2024-12-11T02:26:21,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f64cb4a72e884849924b4a56363963a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:21,463 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2075bf5cc8a54469a6eaca5942961d38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733883980540 2024-12-11T02:26:21,464 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d3d78e8467a41b986f8c76039002120, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:21,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:21,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:21,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:21,486 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:21,487 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b495fd5b643c49d38ca24ab555736503 is 50, key is test_row_0/B:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,506 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:21,507 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/141a9a5c2e044f0db8b0bbeb50658e8e is 50, key is test_row_0/A:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f90ea02a916f4489905ac190081113ae is 50, key is test_row_0/A:col10/1733883981323/Put/seqid=0 2024-12-11T02:26:21,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884041525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:21,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:21,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741917_1093 (size=13187) 2024-12-11T02:26:21,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741918_1094 (size=13187) 2024-12-11T02:26:21,566 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/141a9a5c2e044f0db8b0bbeb50658e8e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/141a9a5c2e044f0db8b0bbeb50658e8e 2024-12-11T02:26:21,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741919_1095 (size=14741) 2024-12-11T02:26:21,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f90ea02a916f4489905ac190081113ae 2024-12-11T02:26:21,583 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 141a9a5c2e044f0db8b0bbeb50658e8e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:21,583 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,583 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883981457; duration=0sec 2024-12-11T02:26:21,583 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:21,583 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:21,583 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:21,585 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:21,585 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:21,586 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,586 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/274f85fecec24b3baacee988f12904b1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.8 K 2024-12-11T02:26:21,586 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 274f85fecec24b3baacee988f12904b1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733883979876 2024-12-11T02:26:21,587 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e832a919c4841ae81939ad372e65975, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733883980540 2024-12-11T02:26:21,587 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c5eca28920d4389a7ebd8c1046ba770, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:21,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/955cfe48ec2b4de29a2220f542b0d79a is 50, key is test_row_0/B:col10/1733883981323/Put/seqid=0 2024-12-11T02:26:21,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884041630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,634 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:21,635 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/5eb56391ec0047a1b16b3f2a6b81b9cc is 50, key is test_row_0/C:col10/1733883981305/Put/seqid=0 2024-12-11T02:26:21,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741920_1096 (size=12301) 2024-12-11T02:26:21,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/955cfe48ec2b4de29a2220f542b0d79a 2024-12-11T02:26:21,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/29c4cfef3f0742e4aab0bf13f7c4e851 is 50, key is test_row_0/C:col10/1733883981323/Put/seqid=0 2024-12-11T02:26:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:21,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741921_1097 (size=13187) 2024-12-11T02:26:21,696 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741922_1098 (size=12301) 2024-12-11T02:26:21,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:21,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:21,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884041807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884041808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884041809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884041809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:21,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884041836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,868 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:21,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:21,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:21,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:21,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:21,965 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b495fd5b643c49d38ca24ab555736503 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b495fd5b643c49d38ca24ab555736503 2024-12-11T02:26:21,975 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into b495fd5b643c49d38ca24ab555736503(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:21,975 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:21,975 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883981457; duration=0sec 2024-12-11T02:26:21,975 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:21,976 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:22,022 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,092 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/5eb56391ec0047a1b16b3f2a6b81b9cc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/5eb56391ec0047a1b16b3f2a6b81b9cc 2024-12-11T02:26:22,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/29c4cfef3f0742e4aab0bf13f7c4e851 2024-12-11T02:26:22,104 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 5eb56391ec0047a1b16b3f2a6b81b9cc(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,104 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,104 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883981457; duration=0sec 2024-12-11T02:26:22,104 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,104 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:22,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/f90ea02a916f4489905ac190081113ae as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae 2024-12-11T02:26:22,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae, entries=200, sequenceid=399, filesize=14.4 K 2024-12-11T02:26:22,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/955cfe48ec2b4de29a2220f542b0d79a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a 2024-12-11T02:26:22,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a, entries=150, sequenceid=399, filesize=12.0 K 2024-12-11T02:26:22,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/29c4cfef3f0742e4aab0bf13f7c4e851 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851 2024-12-11T02:26:22,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851, entries=150, sequenceid=399, filesize=12.0 K 2024-12-11T02:26:22,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 422539d3733f091ff661b5e7e0fc5956 in 666ms, sequenceid=399, compaction requested=false 2024-12-11T02:26:22,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:22,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:22,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:22,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:22,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:22,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/770a37dd1b874375b119ce5b5107f79c is 50, key is test_row_0/A:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741923_1099 (size=12301) 2024-12-11T02:26:22,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/770a37dd1b874375b119ce5b5107f79c 2024-12-11T02:26:22,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:22,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6adeee4da88d40f6ab40888666fc6e77 is 50, key is test_row_0/B:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884042213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741924_1100 (size=12301) 2024-12-11T02:26:22,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6adeee4da88d40f6ab40888666fc6e77 2024-12-11T02:26:22,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/7f10ea355fca44179e2f3c420f6b1362 is 50, key is test_row_0/C:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741925_1101 (size=12301) 2024-12-11T02:26:22,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/7f10ea355fca44179e2f3c420f6b1362 2024-12-11T02:26:22,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/770a37dd1b874375b119ce5b5107f79c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c 2024-12-11T02:26:22,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c, entries=150, sequenceid=420, filesize=12.0 K 2024-12-11T02:26:22,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/6adeee4da88d40f6ab40888666fc6e77 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77 2024-12-11T02:26:22,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77, entries=150, sequenceid=420, filesize=12.0 K 2024-12-11T02:26:22,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/7f10ea355fca44179e2f3c420f6b1362 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362 2024-12-11T02:26:22,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362, entries=150, sequenceid=420, filesize=12.0 K 2024-12-11T02:26:22,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 169ms, sequenceid=420, compaction requested=true 2024-12-11T02:26:22,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,313 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:22,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,314 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:22,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:22,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:22,315 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,316 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:22,316 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,316 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/141a9a5c2e044f0db8b0bbeb50658e8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=39.3 K 2024-12-11T02:26:22,316 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,316 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:22,316 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,316 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b495fd5b643c49d38ca24ab555736503, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.9 K 2024-12-11T02:26:22,317 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 141a9a5c2e044f0db8b0bbeb50658e8e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:22,317 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b495fd5b643c49d38ca24ab555736503, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:22,318 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f90ea02a916f4489905ac190081113ae, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733883981323 2024-12-11T02:26:22,318 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 955cfe48ec2b4de29a2220f542b0d79a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733883981323 2024-12-11T02:26:22,318 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6adeee4da88d40f6ab40888666fc6e77, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,319 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 770a37dd1b874375b119ce5b5107f79c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:22,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:22,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/761e117a9ec0408c83e1b2d6bfbfce7e is 50, key is test_row_0/A:col10/1733883982209/Put/seqid=0 2024-12-11T02:26:22,332 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,337 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#88 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,337 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/cb7ce8c6ad7b46a3a6e9cbca4252a455 is 50, key is test_row_0/B:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,347 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#89 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,348 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/26653e8f2e9e4b6a8990a934839d7b76 is 50, key is test_row_0/A:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741926_1102 (size=14741) 2024-12-11T02:26:22,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/761e117a9ec0408c83e1b2d6bfbfce7e 2024-12-11T02:26:22,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741927_1103 (size=13289) 2024-12-11T02:26:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741928_1104 (size=13289) 2024-12-11T02:26:22,391 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/cb7ce8c6ad7b46a3a6e9cbca4252a455 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb7ce8c6ad7b46a3a6e9cbca4252a455 2024-12-11T02:26:22,394 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/26653e8f2e9e4b6a8990a934839d7b76 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/26653e8f2e9e4b6a8990a934839d7b76 2024-12-11T02:26:22,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/00ce75f7f8fb4186845ae4754dccbe46 is 50, key is test_row_0/B:col10/1733883982209/Put/seqid=0 2024-12-11T02:26:22,406 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into cb7ce8c6ad7b46a3a6e9cbca4252a455(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,406 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,406 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883982314; duration=0sec 2024-12-11T02:26:22,406 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:22,406 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:22,406 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,408 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,408 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 26653e8f2e9e4b6a8990a934839d7b76(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,408 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:22,408 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883982312; duration=0sec 2024-12-11T02:26:22,408 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:22,408 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/5eb56391ec0047a1b16b3f2a6b81b9cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=36.9 K 2024-12-11T02:26:22,409 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eb56391ec0047a1b16b3f2a6b81b9cc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733883980686 2024-12-11T02:26:22,410 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 29c4cfef3f0742e4aab0bf13f7c4e851, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1733883981323 2024-12-11T02:26:22,410 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f10ea355fca44179e2f3c420f6b1362, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741929_1105 (size=12301) 2024-12-11T02:26:22,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/00ce75f7f8fb4186845ae4754dccbe46 2024-12-11T02:26:22,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884042410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,427 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#91 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,428 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/6bbf63e671214843b3e5d011e6b87b42 is 50, key is test_row_0/C:col10/1733883982141/Put/seqid=0 2024-12-11T02:26:22,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/436c83b913c4467ca616baa1308deed6 is 50, key is test_row_0/C:col10/1733883982209/Put/seqid=0 2024-12-11T02:26:22,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741930_1106 (size=13289) 2024-12-11T02:26:22,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/6bbf63e671214843b3e5d011e6b87b42 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6bbf63e671214843b3e5d011e6b87b42 2024-12-11T02:26:22,470 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 6bbf63e671214843b3e5d011e6b87b42(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,470 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883982314; duration=0sec 2024-12-11T02:26:22,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:22,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741931_1107 (size=12301) 2024-12-11T02:26:22,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/436c83b913c4467ca616baa1308deed6 2024-12-11T02:26:22,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/761e117a9ec0408c83e1b2d6bfbfce7e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e 2024-12-11T02:26:22,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e, entries=200, sequenceid=439, filesize=14.4 K 2024-12-11T02:26:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/00ce75f7f8fb4186845ae4754dccbe46 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46 2024-12-11T02:26:22,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46, entries=150, sequenceid=439, filesize=12.0 K 2024-12-11T02:26:22,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/436c83b913c4467ca616baa1308deed6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6 2024-12-11T02:26:22,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6, entries=150, sequenceid=439, filesize=12.0 K 2024-12-11T02:26:22,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 422539d3733f091ff661b5e7e0fc5956 in 186ms, sequenceid=439, compaction requested=false 2024-12-11T02:26:22,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:22,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7874325c3b754c27a0de538c6585f40d is 50, key is test_row_0/A:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741932_1108 (size=12301) 2024-12-11T02:26:22,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7874325c3b754c27a0de538c6585f40d 2024-12-11T02:26:22,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/50430165b1114cfd910e9e4b8b0db2c3 is 50, key is test_row_0/B:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741933_1109 (size=12301) 2024-12-11T02:26:22,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/50430165b1114cfd910e9e4b8b0db2c3 2024-12-11T02:26:22,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/050df7010743427f864e313b32ce83ee is 50, key is test_row_0/C:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884042640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741934_1110 (size=12301) 2024-12-11T02:26:22,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/050df7010743427f864e313b32ce83ee 2024-12-11T02:26:22,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/7874325c3b754c27a0de538c6585f40d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d 2024-12-11T02:26:22,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d, entries=150, sequenceid=460, filesize=12.0 K 2024-12-11T02:26:22,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/50430165b1114cfd910e9e4b8b0db2c3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3 2024-12-11T02:26:22,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3, entries=150, sequenceid=460, filesize=12.0 K 2024-12-11T02:26:22,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/050df7010743427f864e313b32ce83ee as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee 2024-12-11T02:26:22,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee, entries=150, sequenceid=460, filesize=12.0 K 2024-12-11T02:26:22,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 210ms, sequenceid=460, compaction requested=true 2024-12-11T02:26:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,736 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,736 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:22,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:22,738 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,738 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,738 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:22,738 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:22,738 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,738 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,738 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb7ce8c6ad7b46a3a6e9cbca4252a455, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=37.0 K 2024-12-11T02:26:22,738 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/26653e8f2e9e4b6a8990a934839d7b76, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=39.4 K 2024-12-11T02:26:22,739 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26653e8f2e9e4b6a8990a934839d7b76, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,739 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cb7ce8c6ad7b46a3a6e9cbca4252a455, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,739 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 761e117a9ec0408c83e1b2d6bfbfce7e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733883982201 2024-12-11T02:26:22,740 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 00ce75f7f8fb4186845ae4754dccbe46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733883982201 2024-12-11T02:26:22,740 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7874325c3b754c27a0de538c6585f40d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:22,740 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 50430165b1114cfd910e9e4b8b0db2c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:22,758 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,759 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/c37b6a11acf146fd9bc4605173b74592 is 50, key is test_row_0/B:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T02:26:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:22,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:22,765 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,766 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/459df187d0ba4ec9a7de492853092ae5 is 50, key is test_row_0/A:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d1073e5200514bea8ed68787adbba8de is 50, key is test_row_0/A:col10/1733883982626/Put/seqid=0 2024-12-11T02:26:22,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884042818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741935_1111 (size=13391) 2024-12-11T02:26:22,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884042821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884042821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884042822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884042823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,835 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/c37b6a11acf146fd9bc4605173b74592 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/c37b6a11acf146fd9bc4605173b74592 2024-12-11T02:26:22,842 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into c37b6a11acf146fd9bc4605173b74592(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,842 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,842 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883982736; duration=0sec 2024-12-11T02:26:22,842 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:22,842 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:22,842 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:22,843 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:22,843 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:22,844 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,844 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6bbf63e671214843b3e5d011e6b87b42, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=37.0 K 2024-12-11T02:26:22,845 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bbf63e671214843b3e5d011e6b87b42, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1733883981515 2024-12-11T02:26:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741936_1112 (size=13391) 2024-12-11T02:26:22,846 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 436c83b913c4467ca616baa1308deed6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733883982201 2024-12-11T02:26:22,846 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 050df7010743427f864e313b32ce83ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:22,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741937_1113 (size=14741) 2024-12-11T02:26:22,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d1073e5200514bea8ed68787adbba8de 2024-12-11T02:26:22,862 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#99 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:22,863 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/45b0b6ca6c53452997bc1ad39ca2884f is 50, key is test_row_0/C:col10/1733883982522/Put/seqid=0 2024-12-11T02:26:22,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/46a32f20c2c84227a6d682ba6c46c0de is 50, key is test_row_0/B:col10/1733883982626/Put/seqid=0 2024-12-11T02:26:22,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741938_1114 (size=13391) 2024-12-11T02:26:22,917 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/45b0b6ca6c53452997bc1ad39ca2884f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/45b0b6ca6c53452997bc1ad39ca2884f 2024-12-11T02:26:22,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741939_1115 (size=12301) 2024-12-11T02:26:22,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/46a32f20c2c84227a6d682ba6c46c0de 2024-12-11T02:26:22,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884042927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884042932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884042933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,936 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 45b0b6ca6c53452997bc1ad39ca2884f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:22,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:22,937 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:22,937 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883982736; duration=0sec 2024-12-11T02:26:22,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884042933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,937 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:22,938 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:22,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/280cb76f67eb4e4786de7eba3b837e6e is 50, key is test_row_0/C:col10/1733883982626/Put/seqid=0 2024-12-11T02:26:22,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:22,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:22,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:22,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:22,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:22,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741940_1116 (size=12301) 2024-12-11T02:26:22,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884043134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884043136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884043137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884043139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:23,255 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/459df187d0ba4ec9a7de492853092ae5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/459df187d0ba4ec9a7de492853092ae5 2024-12-11T02:26:23,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:23,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:23,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:23,265 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 459df187d0ba4ec9a7de492853092ae5(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:23,265 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:23,265 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883982736; duration=0sec 2024-12-11T02:26:23,265 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:23,265 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:23,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/280cb76f67eb4e4786de7eba3b837e6e 2024-12-11T02:26:23,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d1073e5200514bea8ed68787adbba8de as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de 2024-12-11T02:26:23,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de, entries=200, sequenceid=479, filesize=14.4 K 2024-12-11T02:26:23,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/46a32f20c2c84227a6d682ba6c46c0de as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de 2024-12-11T02:26:23,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de, entries=150, sequenceid=479, filesize=12.0 K 2024-12-11T02:26:23,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/280cb76f67eb4e4786de7eba3b837e6e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e 2024-12-11T02:26:23,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e, entries=150, sequenceid=479, filesize=12.0 K 2024-12-11T02:26:23,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 640ms, sequenceid=479, compaction requested=false 2024-12-11T02:26:23,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:23,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T02:26:23,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,413 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:23,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:23,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:23,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:23,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4a6969b91372408bbfa4571e35498f57 is 50, key is test_row_0/A:col10/1733883982809/Put/seqid=0 2024-12-11T02:26:23,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:23,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:23,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884043456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884043458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884043459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884043460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741941_1117 (size=12301) 2024-12-11T02:26:23,471 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4a6969b91372408bbfa4571e35498f57 2024-12-11T02:26:23,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8749e95ed05a4902a5b4c4854e6ef724 is 50, key is test_row_0/B:col10/1733883982809/Put/seqid=0 2024-12-11T02:26:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741942_1118 (size=12301) 2024-12-11T02:26:23,512 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8749e95ed05a4902a5b4c4854e6ef724 2024-12-11T02:26:23,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/08cc35c32afc4c01b73b710e62ab9ba5 is 50, key is test_row_0/C:col10/1733883982809/Put/seqid=0 2024-12-11T02:26:23,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884043562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884043562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884043565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884043565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741943_1119 (size=12301) 2024-12-11T02:26:23,585 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/08cc35c32afc4c01b73b710e62ab9ba5 2024-12-11T02:26:23,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4a6969b91372408bbfa4571e35498f57 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57 2024-12-11T02:26:23,601 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57, entries=150, sequenceid=500, filesize=12.0 K 2024-12-11T02:26:23,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/8749e95ed05a4902a5b4c4854e6ef724 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724 2024-12-11T02:26:23,610 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724, entries=150, sequenceid=500, filesize=12.0 K 2024-12-11T02:26:23,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/08cc35c32afc4c01b73b710e62ab9ba5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5 2024-12-11T02:26:23,619 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5, entries=150, sequenceid=500, filesize=12.0 K 2024-12-11T02:26:23,621 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 422539d3733f091ff661b5e7e0fc5956 in 208ms, sequenceid=500, compaction requested=true 2024-12-11T02:26:23,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:23,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:23,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-11T02:26:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-11T02:26:23,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-11T02:26:23,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5450 sec 2024-12-11T02:26:23,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.5540 sec 2024-12-11T02:26:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:23,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T02:26:23,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:23,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:23,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:23,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:23,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c48e92fcc9f24a6288149846225df8a9 is 50, key is test_row_0/A:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:23,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741944_1120 (size=17181) 2024-12-11T02:26:23,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884043781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884043782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884043784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884043784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884043886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884043887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884043887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:23,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884043889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884044092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884044092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884044093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884044094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c48e92fcc9f24a6288149846225df8a9 2024-12-11T02:26:24,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ed56ff45653c40dfba24c5e9fe0f84cc is 50, key is test_row_0/B:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741945_1121 (size=12301) 2024-12-11T02:26:24,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884044396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884044396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884044401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884044401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ed56ff45653c40dfba24c5e9fe0f84cc 2024-12-11T02:26:24,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/438a423e9f444a588251ef75a0a890d8 is 50, key is test_row_0/C:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:24,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741946_1122 (size=12301) 2024-12-11T02:26:24,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/438a423e9f444a588251ef75a0a890d8 2024-12-11T02:26:24,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c48e92fcc9f24a6288149846225df8a9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9 2024-12-11T02:26:24,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9, entries=250, sequenceid=521, filesize=16.8 K 2024-12-11T02:26:24,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ed56ff45653c40dfba24c5e9fe0f84cc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc 2024-12-11T02:26:24,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc, entries=150, sequenceid=521, filesize=12.0 K 2024-12-11T02:26:24,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/438a423e9f444a588251ef75a0a890d8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8 2024-12-11T02:26:24,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8, entries=150, sequenceid=521, filesize=12.0 K 2024-12-11T02:26:24,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 422539d3733f091ff661b5e7e0fc5956 in 935ms, sequenceid=521, compaction requested=true 2024-12-11T02:26:24,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:24,706 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:24,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:24,707 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57614 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:24,708 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:24,708 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:24,708 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/459df187d0ba4ec9a7de492853092ae5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=56.3 K 2024-12-11T02:26:24,708 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 459df187d0ba4ec9a7de492853092ae5, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:24,709 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1073e5200514bea8ed68787adbba8de, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1733883982626 2024-12-11T02:26:24,709 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a6969b91372408bbfa4571e35498f57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733883982809 2024-12-11T02:26:24,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:24,710 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:24,710 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c48e92fcc9f24a6288149846225df8a9, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983445 2024-12-11T02:26:24,713 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:24,713 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:24,713 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:24,713 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/c37b6a11acf146fd9bc4605173b74592, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.1 K 2024-12-11T02:26:24,714 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c37b6a11acf146fd9bc4605173b74592, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:24,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 46a32f20c2c84227a6d682ba6c46c0de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1733883982626 2024-12-11T02:26:24,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8749e95ed05a4902a5b4c4854e6ef724, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733883982809 2024-12-11T02:26:24,716 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ed56ff45653c40dfba24c5e9fe0f84cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983766 2024-12-11T02:26:24,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:24,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:24,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:24,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:24,730 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:24,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/45da21b47870457e84cb3a5a425939d8 is 50, key is test_row_0/A:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:24,733 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#109 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:24,733 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/eaec0ce6246048a98dcf8a10fd52921c is 50, key is test_row_0/B:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741948_1124 (size=13527) 2024-12-11T02:26:24,813 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/eaec0ce6246048a98dcf8a10fd52921c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/eaec0ce6246048a98dcf8a10fd52921c 2024-12-11T02:26:24,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741947_1123 (size=13527) 2024-12-11T02:26:24,822 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into eaec0ce6246048a98dcf8a10fd52921c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:24,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:24,822 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=12, startTime=1733883984710; duration=0sec 2024-12-11T02:26:24,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:24,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:24,823 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:24,823 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/45da21b47870457e84cb3a5a425939d8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/45da21b47870457e84cb3a5a425939d8 2024-12-11T02:26:24,826 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:24,826 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:24,826 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:24,827 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/45b0b6ca6c53452997bc1ad39ca2884f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.1 K 2024-12-11T02:26:24,828 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 45b0b6ca6c53452997bc1ad39ca2884f, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733883982396 2024-12-11T02:26:24,829 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 280cb76f67eb4e4786de7eba3b837e6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1733883982626 2024-12-11T02:26:24,829 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 08cc35c32afc4c01b73b710e62ab9ba5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733883982809 2024-12-11T02:26:24,830 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 438a423e9f444a588251ef75a0a890d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983766 2024-12-11T02:26:24,831 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 45da21b47870457e84cb3a5a425939d8(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:24,831 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:24,831 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=12, startTime=1733883984705; duration=0sec 2024-12-11T02:26:24,832 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:24,832 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:24,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T02:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:24,848 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#110 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:24,849 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/2f209482e36e40edb06ebb33551fe709 is 50, key is test_row_0/C:col10/1733883983769/Put/seqid=0 2024-12-11T02:26:24,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d93f5404afc04f65bf02a385039aaab9 is 50, key is test_row_0/A:col10/1733883983777/Put/seqid=0 2024-12-11T02:26:24,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741949_1125 (size=13527) 2024-12-11T02:26:24,898 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/2f209482e36e40edb06ebb33551fe709 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2f209482e36e40edb06ebb33551fe709 2024-12-11T02:26:24,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884044905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884044906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884044908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,912 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 2f209482e36e40edb06ebb33551fe709(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:24,913 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:24,913 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=12, startTime=1733883984723; duration=0sec 2024-12-11T02:26:24,913 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:24,913 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:24,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884044908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884044909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741950_1126 (size=14741) 2024-12-11T02:26:24,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=539 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d93f5404afc04f65bf02a385039aaab9 2024-12-11T02:26:24,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ef67560add9a4b2088e32709cb122623 is 50, key is test_row_0/B:col10/1733883983777/Put/seqid=0 2024-12-11T02:26:24,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741951_1127 (size=12301) 2024-12-11T02:26:25,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884045014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884045015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884045015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884045015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T02:26:25,186 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-11T02:26:25,187 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-11T02:26:25,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T02:26:25,189 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:25,193 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:25,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:25,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884045217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884045219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884045219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884045220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T02:26:25,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-11T02:26:25,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:25,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:25,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:25,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:25,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:25,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=539 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ef67560add9a4b2088e32709cb122623 2024-12-11T02:26:25,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a8317d6036b4410c8ae41b8d92ca0289 is 50, key is test_row_0/C:col10/1733883983777/Put/seqid=0 2024-12-11T02:26:25,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741952_1128 (size=12301) 2024-12-11T02:26:25,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=539 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a8317d6036b4410c8ae41b8d92ca0289 2024-12-11T02:26:25,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d93f5404afc04f65bf02a385039aaab9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9 2024-12-11T02:26:25,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9, entries=200, sequenceid=539, filesize=14.4 K 2024-12-11T02:26:25,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/ef67560add9a4b2088e32709cb122623 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623 2024-12-11T02:26:25,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623, entries=150, sequenceid=539, filesize=12.0 K 2024-12-11T02:26:25,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a8317d6036b4410c8ae41b8d92ca0289 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289 2024-12-11T02:26:25,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289, entries=150, sequenceid=539, filesize=12.0 K 2024-12-11T02:26:25,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 422539d3733f091ff661b5e7e0fc5956 in 602ms, sequenceid=539, compaction requested=false 2024-12-11T02:26:25,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:25,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T02:26:25,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:25,500 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:25,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:25,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:25,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4e8900f8ad95411eb9b7440f29948e12 is 50, key is test_row_0/A:col10/1733883984906/Put/seqid=0 2024-12-11T02:26:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:25,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:25,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741953_1129 (size=12301) 2024-12-11T02:26:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884045534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884045537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884045537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884045537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884045639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884045642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884045644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884045644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T02:26:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884045844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884045845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884045846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884045846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,926 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4e8900f8ad95411eb9b7440f29948e12 2024-12-11T02:26:25,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884045926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:25,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b541dbeec4344800826afbd7ca6f4e39 is 50, key is test_row_0/B:col10/1733883984906/Put/seqid=0 2024-12-11T02:26:25,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741954_1130 (size=12301) 2024-12-11T02:26:25,979 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b541dbeec4344800826afbd7ca6f4e39 2024-12-11T02:26:25,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/799a59b149324ac69e7ca49b5d9fc2bf is 50, key is test_row_0/C:col10/1733883984906/Put/seqid=0 2024-12-11T02:26:26,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741955_1131 (size=12301) 2024-12-11T02:26:26,003 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/799a59b149324ac69e7ca49b5d9fc2bf 2024-12-11T02:26:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/4e8900f8ad95411eb9b7440f29948e12 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12 2024-12-11T02:26:26,018 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12, entries=150, sequenceid=560, filesize=12.0 K 2024-12-11T02:26:26,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b541dbeec4344800826afbd7ca6f4e39 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39 2024-12-11T02:26:26,030 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39, entries=150, sequenceid=560, filesize=12.0 K 2024-12-11T02:26:26,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/799a59b149324ac69e7ca49b5d9fc2bf as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf 2024-12-11T02:26:26,037 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf, entries=150, sequenceid=560, filesize=12.0 K 2024-12-11T02:26:26,038 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 422539d3733f091ff661b5e7e0fc5956 in 538ms, sequenceid=560, compaction requested=true 2024-12-11T02:26:26,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:26,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-11T02:26:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-11T02:26:26,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-11T02:26:26,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 846 msec 2024-12-11T02:26:26,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 855 msec 2024-12-11T02:26:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:26,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T02:26:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:26,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d2ece349cfaa46a3985c29076a2ebbec is 50, key is test_row_0/A:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884046173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884046173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884046173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884046174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741956_1132 (size=12301) 2024-12-11T02:26:26,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d2ece349cfaa46a3985c29076a2ebbec 2024-12-11T02:26:26,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/4645fb24ea2e479c9de5a0b61d7ba50b is 50, key is test_row_0/B:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741957_1133 (size=12301) 2024-12-11T02:26:26,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/4645fb24ea2e479c9de5a0b61d7ba50b 2024-12-11T02:26:26,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/00993a93fd2a4b768b1c6becf3a569d4 is 50, key is test_row_0/C:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741958_1134 (size=12301) 2024-12-11T02:26:26,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/00993a93fd2a4b768b1c6becf3a569d4 2024-12-11T02:26:26,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/d2ece349cfaa46a3985c29076a2ebbec as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec 2024-12-11T02:26:26,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec, entries=150, sequenceid=578, filesize=12.0 K 2024-12-11T02:26:26,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/4645fb24ea2e479c9de5a0b61d7ba50b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b 2024-12-11T02:26:26,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b, entries=150, sequenceid=578, filesize=12.0 K 2024-12-11T02:26:26,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/00993a93fd2a4b768b1c6becf3a569d4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4 2024-12-11T02:26:26,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884046278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4, entries=150, sequenceid=578, filesize=12.0 K 2024-12-11T02:26:26,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 422539d3733f091ff661b5e7e0fc5956 in 133ms, sequenceid=578, compaction requested=true 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:26,284 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:26,284 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:26,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:26,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T02:26:26,286 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52870 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:26,286 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:26,287 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,287 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/45da21b47870457e84cb3a5a425939d8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=51.6 K 2024-12-11T02:26:26,288 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45da21b47870457e84cb3a5a425939d8, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983766 2024-12-11T02:26:26,289 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:26,289 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:26,289 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,289 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/eaec0ce6246048a98dcf8a10fd52921c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.2 K 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:26,289 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d93f5404afc04f65bf02a385039aaab9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=539, earliestPutTs=1733883983777 2024-12-11T02:26:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:26,290 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting eaec0ce6246048a98dcf8a10fd52921c, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983766 2024-12-11T02:26:26,290 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e8900f8ad95411eb9b7440f29948e12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1733883984902 2024-12-11T02:26:26,290 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ef67560add9a4b2088e32709cb122623, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=539, earliestPutTs=1733883983777 2024-12-11T02:26:26,291 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2ece349cfaa46a3985c29076a2ebbec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:26,291 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b541dbeec4344800826afbd7ca6f4e39, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1733883984902 2024-12-11T02:26:26,292 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4645fb24ea2e479c9de5a0b61d7ba50b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T02:26:26,293 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-11T02:26:26,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-11T02:26:26,297 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:26,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:26,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:26,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/89f09c4b7a36467494c7615a29a84853 is 50, key is test_row_0/A:col10/1733883986285/Put/seqid=0 2024-12-11T02:26:26,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884046316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884046318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884046319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,329 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:26,330 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#122 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:26,330 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/bb8b2aa269cd44f08e12534bedb010cb is 50, key is test_row_0/A:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,331 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b95a17ee4e344d7eb999ba92cce1971d is 50, key is test_row_0/B:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741960_1136 (size=13663) 2024-12-11T02:26:26,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741961_1137 (size=13663) 2024-12-11T02:26:26,368 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/bb8b2aa269cd44f08e12534bedb010cb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/bb8b2aa269cd44f08e12534bedb010cb 2024-12-11T02:26:26,374 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/b95a17ee4e344d7eb999ba92cce1971d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b95a17ee4e344d7eb999ba92cce1971d 2024-12-11T02:26:26,389 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into bb8b2aa269cd44f08e12534bedb010cb(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:26,390 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:26,390 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=12, startTime=1733883986284; duration=0sec 2024-12-11T02:26:26,390 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:26,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741959_1135 (size=12301) 2024-12-11T02:26:26,390 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:26,390 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:26,391 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into b95a17ee4e344d7eb999ba92cce1971d(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:26,391 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:26,391 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=12, startTime=1733883986284; duration=0sec 2024-12-11T02:26:26,391 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:26,391 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:26,395 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:26,395 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:26,395 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,396 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2f209482e36e40edb06ebb33551fe709, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.2 K 2024-12-11T02:26:26,396 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f209482e36e40edb06ebb33551fe709, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=521, earliestPutTs=1733883983766 2024-12-11T02:26:26,397 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8317d6036b4410c8ae41b8d92ca0289, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=539, earliestPutTs=1733883983777 2024-12-11T02:26:26,397 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 799a59b149324ac69e7ca49b5d9fc2bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1733883984902 2024-12-11T02:26:26,398 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00993a93fd2a4b768b1c6becf3a569d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:26,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:26,411 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:26,411 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/2a9a3366b226420f9ead1dde6b932084 is 50, key is test_row_0/C:col10/1733883985535/Put/seqid=0 2024-12-11T02:26:26,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884046424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884046427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884046428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,451 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:26,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:26,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741962_1138 (size=13663) 2024-12-11T02:26:26,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,464 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/2a9a3366b226420f9ead1dde6b932084 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2a9a3366b226420f9ead1dde6b932084 2024-12-11T02:26:26,476 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 2a9a3366b226420f9ead1dde6b932084(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:26,476 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:26,476 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=12, startTime=1733883986284; duration=0sec 2024-12-11T02:26:26,476 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:26,477 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:26,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884046482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:26,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884046631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884046635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884046636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,758 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884046786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/89f09c4b7a36467494c7615a29a84853 2024-12-11T02:26:26,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/3d00bfa3d01c464caf3dee2e0308500e is 50, key is test_row_0/B:col10/1733883986285/Put/seqid=0 2024-12-11T02:26:26,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741963_1139 (size=12301) 2024-12-11T02:26:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:26,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:26,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:26,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:26,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:26,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884046936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884046939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:26,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:26,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884046940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,069 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:27,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:27,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/3d00bfa3d01c464caf3dee2e0308500e 2024-12-11T02:26:27,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a90a370a81494047a003d7e3f574a92a is 50, key is test_row_0/C:col10/1733883986285/Put/seqid=0 2024-12-11T02:26:27,222 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:27,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741964_1140 (size=12301) 2024-12-11T02:26:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884047292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,376 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:27,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:27,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:27,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884047440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:27,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884047445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:27,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884047447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,529 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:27,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:27,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:27,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=599 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a90a370a81494047a003d7e3f574a92a 2024-12-11T02:26:27,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/89f09c4b7a36467494c7615a29a84853 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853 2024-12-11T02:26:27,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853, entries=150, sequenceid=599, filesize=12.0 K 2024-12-11T02:26:27,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/3d00bfa3d01c464caf3dee2e0308500e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e 2024-12-11T02:26:27,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e, entries=150, sequenceid=599, filesize=12.0 K 2024-12-11T02:26:27,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/a90a370a81494047a003d7e3f574a92a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a 2024-12-11T02:26:27,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a, entries=150, sequenceid=599, filesize=12.0 K 2024-12-11T02:26:27,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 422539d3733f091ff661b5e7e0fc5956 in 1376ms, sequenceid=599, compaction requested=false 2024-12-11T02:26:27,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:27,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:27,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T02:26:27,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:27,685 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:27,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:27,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/3f17227cc3fc4d22bb840d1a1b705e8c is 50, key is test_row_0/A:col10/1733883986308/Put/seqid=0 2024-12-11T02:26:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741965_1141 (size=12301) 2024-12-11T02:26:27,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:27,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:27,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:27,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884047997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884048100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,108 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=617 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/3f17227cc3fc4d22bb840d1a1b705e8c 2024-12-11T02:26:28,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/dca396eefb3946af922a5c0272d8130d is 50, key is test_row_0/B:col10/1733883986308/Put/seqid=0 2024-12-11T02:26:28,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741966_1142 (size=12301) 2024-12-11T02:26:28,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884048299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884048304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:28,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884048449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884048455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884048456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,539 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=617 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/dca396eefb3946af922a5c0272d8130d 2024-12-11T02:26:28,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b92c590d94bf4c3d9d6b2d13cb1cd66f is 50, key is test_row_0/C:col10/1733883986308/Put/seqid=0 2024-12-11T02:26:28,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741967_1143 (size=12301) 2024-12-11T02:26:28,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:28,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884048607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:28,956 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=617 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b92c590d94bf4c3d9d6b2d13cb1cd66f 2024-12-11T02:26:28,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/3f17227cc3fc4d22bb840d1a1b705e8c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c 2024-12-11T02:26:28,970 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c, entries=150, sequenceid=617, filesize=12.0 K 2024-12-11T02:26:28,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/dca396eefb3946af922a5c0272d8130d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d 2024-12-11T02:26:28,978 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d, entries=150, sequenceid=617, filesize=12.0 K 2024-12-11T02:26:28,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/b92c590d94bf4c3d9d6b2d13cb1cd66f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f 2024-12-11T02:26:28,984 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f, entries=150, sequenceid=617, filesize=12.0 K 2024-12-11T02:26:28,985 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 422539d3733f091ff661b5e7e0fc5956 in 1301ms, sequenceid=617, compaction requested=true 2024-12-11T02:26:28,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:28,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:28,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-11T02:26:28,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-11T02:26:28,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-11T02:26:28,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6890 sec 2024-12-11T02:26:28,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.6960 sec 2024-12-11T02:26:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:29,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:29,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:29,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0a63f8002e7d40e79fe59def7f0e546f is 50, key is test_row_0/A:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:29,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741968_1144 (size=14741) 2024-12-11T02:26:29,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884049147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:29,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:29,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884049250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:29,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884049455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:29,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0a63f8002e7d40e79fe59def7f0e546f 2024-12-11T02:26:29,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/e6725f25cbde4a638c5975690c434407 is 50, key is test_row_0/B:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:29,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741969_1145 (size=12301) 2024-12-11T02:26:29,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884049757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:29,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/e6725f25cbde4a638c5975690c434407 2024-12-11T02:26:29,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/93e26657e212403ea4b3fe009a4367f2 is 50, key is test_row_0/C:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:29,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741970_1146 (size=12301) 2024-12-11T02:26:29,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=639 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/93e26657e212403ea4b3fe009a4367f2 2024-12-11T02:26:29,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/0a63f8002e7d40e79fe59def7f0e546f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f 2024-12-11T02:26:29,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f, entries=200, sequenceid=639, filesize=14.4 K 2024-12-11T02:26:29,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/e6725f25cbde4a638c5975690c434407 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407 2024-12-11T02:26:29,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407, entries=150, sequenceid=639, filesize=12.0 K 2024-12-11T02:26:29,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/93e26657e212403ea4b3fe009a4367f2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2 2024-12-11T02:26:29,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2, entries=150, sequenceid=639, filesize=12.0 K 2024-12-11T02:26:29,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 422539d3733f091ff661b5e7e0fc5956 in 871ms, sequenceid=639, compaction requested=true 2024-12-11T02:26:29,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:29,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:29,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:29,987 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:29,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:29,989 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50566 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:29,989 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:29,989 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:29,989 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b95a17ee4e344d7eb999ba92cce1971d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.4 K 2024-12-11T02:26:29,990 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53006 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:29,990 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b95a17ee4e344d7eb999ba92cce1971d, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:29,990 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:29,990 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:29,990 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/bb8b2aa269cd44f08e12534bedb010cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=51.8 K 2024-12-11T02:26:29,990 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb8b2aa269cd44f08e12534bedb010cb, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:29,990 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d00bfa3d01c464caf3dee2e0308500e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1733883986281 2024-12-11T02:26:29,992 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f09c4b7a36467494c7615a29a84853, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1733883986281 2024-12-11T02:26:29,992 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting dca396eefb3946af922a5c0272d8130d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=617, earliestPutTs=1733883986308 2024-12-11T02:26:29,992 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f17227cc3fc4d22bb840d1a1b705e8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=617, earliestPutTs=1733883986308 2024-12-11T02:26:29,992 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e6725f25cbde4a638c5975690c434407, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987991 2024-12-11T02:26:29,993 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a63f8002e7d40e79fe59def7f0e546f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987982 2024-12-11T02:26:30,010 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#132 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:30,010 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#133 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:30,011 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/8eacbf072d3c4e71a0b59f5d218fd8f3 is 50, key is test_row_0/A:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:30,011 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/d66c6e1cd4f0458abd684c9273181d62 is 50, key is test_row_0/B:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741971_1147 (size=13799) 2024-12-11T02:26:30,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741972_1148 (size=13799) 2024-12-11T02:26:30,043 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/8eacbf072d3c4e71a0b59f5d218fd8f3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8eacbf072d3c4e71a0b59f5d218fd8f3 2024-12-11T02:26:30,054 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into 8eacbf072d3c4e71a0b59f5d218fd8f3(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:30,054 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:30,054 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=12, startTime=1733883989986; duration=0sec 2024-12-11T02:26:30,054 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:30,054 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:30,055 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:30,057 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50566 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:30,057 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:30,057 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,057 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2a9a3366b226420f9ead1dde6b932084, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=49.4 K 2024-12-11T02:26:30,057 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a9a3366b226420f9ead1dde6b932084, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=578, earliestPutTs=1733883985531 2024-12-11T02:26:30,058 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a90a370a81494047a003d7e3f574a92a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=599, earliestPutTs=1733883986281 2024-12-11T02:26:30,058 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b92c590d94bf4c3d9d6b2d13cb1cd66f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=617, earliestPutTs=1733883986308 2024-12-11T02:26:30,059 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93e26657e212403ea4b3fe009a4367f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987991 2024-12-11T02:26:30,076 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#134 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:30,076 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/0ab0c276637547ff888f043a9dfc647f is 50, key is test_row_0/C:col10/1733883989114/Put/seqid=0 2024-12-11T02:26:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741973_1149 (size=13799) 2024-12-11T02:26:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:30,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:30,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/21b20d96c3ed49cbad2a23e8d8434b8b is 50, key is test_row_0/A:col10/1733883989137/Put/seqid=0 2024-12-11T02:26:30,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741974_1150 (size=14741) 2024-12-11T02:26:30,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/21b20d96c3ed49cbad2a23e8d8434b8b 2024-12-11T02:26:30,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/02efacd73fec413e987977e517c497b7 is 50, key is test_row_0/B:col10/1733883989137/Put/seqid=0 2024-12-11T02:26:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741975_1151 (size=12301) 2024-12-11T02:26:30,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/02efacd73fec413e987977e517c497b7 2024-12-11T02:26:30,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d023494905374ac68866ea5ee9910e9f is 50, key is test_row_0/C:col10/1733883989137/Put/seqid=0 2024-12-11T02:26:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741976_1152 (size=12301) 2024-12-11T02:26:30,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d023494905374ac68866ea5ee9910e9f 2024-12-11T02:26:30,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/21b20d96c3ed49cbad2a23e8d8434b8b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b 2024-12-11T02:26:30,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b, entries=200, sequenceid=655, filesize=14.4 K 2024-12-11T02:26:30,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/02efacd73fec413e987977e517c497b7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7 2024-12-11T02:26:30,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7, entries=150, sequenceid=655, filesize=12.0 K 2024-12-11T02:26:30,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d023494905374ac68866ea5ee9910e9f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f 2024-12-11T02:26:30,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884050341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884050341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f, entries=150, sequenceid=655, filesize=12.0 K 2024-12-11T02:26:30,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 422539d3733f091ff661b5e7e0fc5956 in 85ms, sequenceid=655, compaction requested=false 2024-12-11T02:26:30,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T02:26:30,404 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-11T02:26:30,406 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-11T02:26:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:30,407 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:30,408 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:30,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:30,439 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/d66c6e1cd4f0458abd684c9273181d62 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d66c6e1cd4f0458abd684c9273181d62 2024-12-11T02:26:30,446 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into d66c6e1cd4f0458abd684c9273181d62(size=13.5 K), total size for store is 25.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:30,446 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:30,446 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=12, startTime=1733883989987; duration=0sec 2024-12-11T02:26:30,446 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:30,446 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:30,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:30,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:30,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/43451a20b5d341d085ad9f86e5f102ba is 50, key is test_row_0/A:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:30,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741977_1153 (size=17181) 2024-12-11T02:26:30,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34718 deadline: 1733884050462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884050462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884050463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884050464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,466 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:30,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34732 deadline: 1733884050475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,476 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:30,497 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/0ab0c276637547ff888f043a9dfc647f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0ab0c276637547ff888f043a9dfc647f 2024-12-11T02:26:30,503 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 0ab0c276637547ff888f043a9dfc647f(size=13.5 K), total size for store is 25.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:30,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:30,504 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=12, startTime=1733883989987; duration=0sec 2024-12-11T02:26:30,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:30,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:30,560 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:30,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:30,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884050566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884050567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884050568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:30,713 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:30,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:30,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884050770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884050770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884050770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/43451a20b5d341d085ad9f86e5f102ba 2024-12-11T02:26:30,866 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:30,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:30,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:30,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:30,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:30,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/49480c14d1724475a5d9cb13b9d4999a is 50, key is test_row_0/B:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:30,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741978_1154 (size=12301) 2024-12-11T02:26:30,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/49480c14d1724475a5d9cb13b9d4999a 2024-12-11T02:26:30,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/dadab8c48a4f477aac2b4a80c4a20684 is 50, key is test_row_0/C:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:30,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741979_1155 (size=12301) 2024-12-11T02:26:31,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:31,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:31,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:31,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1733884051073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34748 deadline: 1733884051073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34772 deadline: 1733884051073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,173 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:31,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:31,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/dadab8c48a4f477aac2b4a80c4a20684 2024-12-11T02:26:31,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:31,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:31,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/43451a20b5d341d085ad9f86e5f102ba as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba 2024-12-11T02:26:31,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba, entries=250, sequenceid=680, filesize=16.8 K 2024-12-11T02:26:31,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/49480c14d1724475a5d9cb13b9d4999a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a 2024-12-11T02:26:31,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a, entries=150, sequenceid=680, filesize=12.0 K 2024-12-11T02:26:31,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/dadab8c48a4f477aac2b4a80c4a20684 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684 2024-12-11T02:26:31,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684, entries=150, sequenceid=680, filesize=12.0 K 2024-12-11T02:26:31,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 422539d3733f091ff661b5e7e0fc5956 in 911ms, sequenceid=680, compaction requested=true 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:31,360 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:31,360 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 422539d3733f091ff661b5e7e0fc5956:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:31,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:31,361 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:31,361 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/A is initiating minor compaction (all files) 2024-12-11T02:26:31,361 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/A in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,361 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8eacbf072d3c4e71a0b59f5d218fd8f3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=44.6 K 2024-12-11T02:26:31,362 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:31,362 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/B is initiating minor compaction (all files) 2024-12-11T02:26:31,362 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/B in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,362 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d66c6e1cd4f0458abd684c9273181d62, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=37.5 K 2024-12-11T02:26:31,362 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8eacbf072d3c4e71a0b59f5d218fd8f3, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987991 2024-12-11T02:26:31,363 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d66c6e1cd4f0458abd684c9273181d62, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987991 2024-12-11T02:26:31,363 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21b20d96c3ed49cbad2a23e8d8434b8b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1733883989137 2024-12-11T02:26:31,363 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 02efacd73fec413e987977e517c497b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1733883989137 2024-12-11T02:26:31,363 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43451a20b5d341d085ad9f86e5f102ba, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1733883990333 2024-12-11T02:26:31,363 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 49480c14d1724475a5d9cb13b9d4999a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1733883990336 2024-12-11T02:26:31,373 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#B#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:31,374 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/7b6c2e3b794f45f5a9f3402fc69788eb is 50, key is test_row_0/B:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:31,374 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#A#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:31,375 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c7434dac3120411fa1814931adc874be is 50, key is test_row_0/A:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:31,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741980_1156 (size=13901) 2024-12-11T02:26:31,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741981_1157 (size=13901) 2024-12-11T02:26:31,415 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/7b6c2e3b794f45f5a9f3402fc69788eb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7b6c2e3b794f45f5a9f3402fc69788eb 2024-12-11T02:26:31,423 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/B of 422539d3733f091ff661b5e7e0fc5956 into 7b6c2e3b794f45f5a9f3402fc69788eb(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:31,423 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:31,423 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/B, priority=13, startTime=1733883991360; duration=0sec 2024-12-11T02:26:31,423 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:31,423 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:B 2024-12-11T02:26:31,424 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:31,426 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:31,426 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 422539d3733f091ff661b5e7e0fc5956/C is initiating minor compaction (all files) 2024-12-11T02:26:31,426 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 422539d3733f091ff661b5e7e0fc5956/C in TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,426 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0ab0c276637547ff888f043a9dfc647f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp, totalSize=37.5 K 2024-12-11T02:26:31,426 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ab0c276637547ff888f043a9dfc647f, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=639, earliestPutTs=1733883987991 2024-12-11T02:26:31,428 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d023494905374ac68866ea5ee9910e9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=655, earliestPutTs=1733883989137 2024-12-11T02:26:31,428 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting dadab8c48a4f477aac2b4a80c4a20684, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1733883990336 2024-12-11T02:26:31,440 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 422539d3733f091ff661b5e7e0fc5956#C#compaction#143 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:31,442 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/87cf69749f7b4fd694687a9a3157be4e is 50, key is test_row_0/C:col10/1733883990336/Put/seqid=0 2024-12-11T02:26:31,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741982_1158 (size=13901) 2024-12-11T02:26:31,481 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:31,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T02:26:31,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,482 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:31,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:31,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/be6f34bbfc03472faf2d6fcb4265ebc8 is 50, key is test_row_0/A:col10/1733883990461/Put/seqid=0 2024-12-11T02:26:31,494 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/87cf69749f7b4fd694687a9a3157be4e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/87cf69749f7b4fd694687a9a3157be4e 2024-12-11T02:26:31,504 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/C of 422539d3733f091ff661b5e7e0fc5956 into 87cf69749f7b4fd694687a9a3157be4e(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:31,504 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:31,504 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/C, priority=13, startTime=1733883991360; duration=0sec 2024-12-11T02:26:31,504 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:31,504 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:C 2024-12-11T02:26:31,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741983_1159 (size=12301) 2024-12-11T02:26:31,507 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=694 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/be6f34bbfc03472faf2d6fcb4265ebc8 2024-12-11T02:26:31,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:31,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1393c88419c9474e904c4a2287c7ad78 is 50, key is test_row_0/B:col10/1733883990461/Put/seqid=0 2024-12-11T02:26:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741984_1160 (size=12301) 2024-12-11T02:26:31,532 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=694 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1393c88419c9474e904c4a2287c7ad78 2024-12-11T02:26:31,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8805d15c71584080ae797bc7cc359112 is 50, key is test_row_0/C:col10/1733883990461/Put/seqid=0 2024-12-11T02:26:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741985_1161 (size=12301) 2024-12-11T02:26:31,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=694 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8805d15c71584080ae797bc7cc359112 2024-12-11T02:26:31,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/be6f34bbfc03472faf2d6fcb4265ebc8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/be6f34bbfc03472faf2d6fcb4265ebc8 2024-12-11T02:26:31,572 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/be6f34bbfc03472faf2d6fcb4265ebc8, entries=150, sequenceid=694, filesize=12.0 K 2024-12-11T02:26:31,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/1393c88419c9474e904c4a2287c7ad78 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1393c88419c9474e904c4a2287c7ad78 2024-12-11T02:26:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:31,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. as already flushing 2024-12-11T02:26:31,584 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:63149 2024-12-11T02:26:31,584 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,584 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1393c88419c9474e904c4a2287c7ad78, entries=150, sequenceid=694, filesize=12.0 K 2024-12-11T02:26:31,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/8805d15c71584080ae797bc7cc359112 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8805d15c71584080ae797bc7cc359112 2024-12-11T02:26:31,588 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:63149 2024-12-11T02:26:31,588 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,590 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:63149 2024-12-11T02:26:31,590 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,591 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:63149 2024-12-11T02:26:31,591 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,592 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:63149 2024-12-11T02:26:31,592 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,592 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:63149 2024-12-11T02:26:31,592 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,596 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:63149 2024-12-11T02:26:31,596 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:31,597 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8805d15c71584080ae797bc7cc359112, entries=150, sequenceid=694, filesize=12.0 K 2024-12-11T02:26:31,598 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=26.84 KB/27480 for 422539d3733f091ff661b5e7e0fc5956 in 115ms, sequenceid=694, compaction requested=false 2024-12-11T02:26:31,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:31,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:31,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-11T02:26:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-11T02:26:31,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-11T02:26:31,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1910 sec 2024-12-11T02:26:31,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.1960 sec 2024-12-11T02:26:31,809 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/c7434dac3120411fa1814931adc874be as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c7434dac3120411fa1814931adc874be 2024-12-11T02:26:31,814 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 422539d3733f091ff661b5e7e0fc5956/A of 422539d3733f091ff661b5e7e0fc5956 into c7434dac3120411fa1814931adc874be(size=13.6 K), total size for store is 25.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:31,814 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:31,814 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956., storeName=422539d3733f091ff661b5e7e0fc5956/A, priority=13, startTime=1733883991360; duration=0sec 2024-12-11T02:26:31,814 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:31,814 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 422539d3733f091ff661b5e7e0fc5956:A 2024-12-11T02:26:32,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T02:26:32,512 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-11T02:26:32,932 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:26:34,494 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:63149 2024-12-11T02:26:34,494 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:34,513 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:63149 2024-12-11T02:26:34,513 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 122 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 177 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5578 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5396 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2457 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7371 rows 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2433 2024-12-11T02:26:34,513 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7299 rows 2024-12-11T02:26:34,514 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:26:34,514 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:63149 2024-12-11T02:26:34,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:34,517 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:26:34,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:26:34,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:34,530 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883994529"}]},"ts":"1733883994529"} 2024-12-11T02:26:34,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:34,531 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:26:34,533 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:26:34,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:26:34,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, UNASSIGN}] 2024-12-11T02:26:34,540 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, UNASSIGN 2024-12-11T02:26:34,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=422539d3733f091ff661b5e7e0fc5956, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:34,541 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:26:34,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; CloseRegionProcedure 422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:34,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:34,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:34,699 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(124): Close 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:34,699 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:26:34,700 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1681): Closing 422539d3733f091ff661b5e7e0fc5956, disabling compactions & flushes 2024-12-11T02:26:34,700 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:34,700 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:34,700 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. after waiting 0 ms 2024-12-11T02:26:34,700 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:34,700 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(2837): Flushing 422539d3733f091ff661b5e7e0fc5956 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-11T02:26:34,700 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=A 2024-12-11T02:26:34,701 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:34,701 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=B 2024-12-11T02:26:34,701 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:34,701 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 422539d3733f091ff661b5e7e0fc5956, store=C 2024-12-11T02:26:34,701 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:34,706 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/ac6b448a10d94bfeab8d1df9fa790034 is 50, key is test_row_0/A:col10/1733883991580/Put/seqid=0 2024-12-11T02:26:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741986_1162 (size=12301) 2024-12-11T02:26:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:35,111 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=705 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/ac6b448a10d94bfeab8d1df9fa790034 2024-12-11T02:26:35,119 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/a22ad1f4b30744d98518015c567ceb2d is 50, key is test_row_0/B:col10/1733883991580/Put/seqid=0 2024-12-11T02:26:35,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741987_1163 (size=12301) 2024-12-11T02:26:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:35,523 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=705 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/a22ad1f4b30744d98518015c567ceb2d 2024-12-11T02:26:35,533 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d371b9501fdb4c258e6cd10248767751 is 50, key is test_row_0/C:col10/1733883991580/Put/seqid=0 2024-12-11T02:26:35,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741988_1164 (size=12301) 2024-12-11T02:26:35,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:35,938 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=705 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d371b9501fdb4c258e6cd10248767751 2024-12-11T02:26:35,944 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/A/ac6b448a10d94bfeab8d1df9fa790034 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/ac6b448a10d94bfeab8d1df9fa790034 2024-12-11T02:26:35,948 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/ac6b448a10d94bfeab8d1df9fa790034, entries=150, sequenceid=705, filesize=12.0 K 2024-12-11T02:26:35,949 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/B/a22ad1f4b30744d98518015c567ceb2d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/a22ad1f4b30744d98518015c567ceb2d 2024-12-11T02:26:35,954 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/a22ad1f4b30744d98518015c567ceb2d, entries=150, sequenceid=705, filesize=12.0 K 2024-12-11T02:26:35,954 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/.tmp/C/d371b9501fdb4c258e6cd10248767751 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d371b9501fdb4c258e6cd10248767751 2024-12-11T02:26:35,959 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d371b9501fdb4c258e6cd10248767751, entries=150, sequenceid=705, filesize=12.0 K 2024-12-11T02:26:35,960 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 422539d3733f091ff661b5e7e0fc5956 in 1259ms, sequenceid=705, compaction requested=true 2024-12-11T02:26:35,960 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21c86aa6b1cf4365a4512b76632a56d8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/5288bbc23a284a48b19b3135db2aa695, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/cb17666e57fb46d4a69db893da02d75e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9b817c1730af4c1a820eec9a5d0cdf7b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/fdb22b1470be4be2a6a8b79f12f992ac, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0685798a1fa34aa394e0ac195a2df046, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7f783c895d3b40e4b31e951514709b64, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/141a9a5c2e044f0db8b0bbeb50658e8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/26653e8f2e9e4b6a8990a934839d7b76, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/459df187d0ba4ec9a7de492853092ae5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/45da21b47870457e84cb3a5a425939d8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/bb8b2aa269cd44f08e12534bedb010cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8eacbf072d3c4e71a0b59f5d218fd8f3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba] to archive 2024-12-11T02:26:35,964 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:26:35,973 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8773f3edbc5c4fc690f2a8ae25b9e0fc 2024-12-11T02:26:35,973 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9466f3a5fc7c49bfb7ee51e8f461125c 2024-12-11T02:26:35,973 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c58fd00a6a75413a8d37af531d784862 2024-12-11T02:26:35,974 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/e62ffbdcd51d4002ba1b0433c9785f16 2024-12-11T02:26:35,974 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21c86aa6b1cf4365a4512b76632a56d8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21c86aa6b1cf4365a4512b76632a56d8 2024-12-11T02:26:35,974 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d7e14a3580dd4ff69e36972b9aa46a48 2024-12-11T02:26:35,975 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/5288bbc23a284a48b19b3135db2aa695 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/5288bbc23a284a48b19b3135db2aa695 2024-12-11T02:26:35,980 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f70d47315944415d948824d791f4c21f 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/06c23403c85f4c06a6cbd25581a86ada 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/cb17666e57fb46d4a69db893da02d75e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/cb17666e57fb46d4a69db893da02d75e 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b5c021239c9249689a2c82cefd93a1df 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/215dc3d983cf4192987e56d812bb1e08 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/59370aa358f94bffaf2d417a034f2ebc 2024-12-11T02:26:35,981 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9b817c1730af4c1a820eec9a5d0cdf7b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/9b817c1730af4c1a820eec9a5d0cdf7b 2024-12-11T02:26:35,982 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/b86a9c26315d4f01bc7d48cfdea7bc6e 2024-12-11T02:26:35,983 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0685798a1fa34aa394e0ac195a2df046 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0685798a1fa34aa394e0ac195a2df046 2024-12-11T02:26:35,983 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/23cafecaf6e74d92b6196dde10ab02f0 2024-12-11T02:26:35,984 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4372c41fa36e469e98ba8d5197a1aa66 2024-12-11T02:26:35,984 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/fdb22b1470be4be2a6a8b79f12f992ac to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/fdb22b1470be4be2a6a8b79f12f992ac 2024-12-11T02:26:35,985 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7f783c895d3b40e4b31e951514709b64 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7f783c895d3b40e4b31e951514709b64 2024-12-11T02:26:35,985 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2075bf5cc8a54469a6eaca5942961d38 2024-12-11T02:26:35,985 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0fe8b0e0806144958ef147b0d1f6c455 2024-12-11T02:26:35,986 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/764f2ff017f242efb2fa6d9fff26dae2 2024-12-11T02:26:35,986 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/699004d0b12d43d4bffd97ffb5a283cb 2024-12-11T02:26:35,987 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/141a9a5c2e044f0db8b0bbeb50658e8e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/141a9a5c2e044f0db8b0bbeb50658e8e 2024-12-11T02:26:35,986 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7938759c0dd4448785bbb5cb3b768d8c 2024-12-11T02:26:35,987 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/2d3d78e8467a41b986f8c76039002120 2024-12-11T02:26:35,987 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/f90ea02a916f4489905ac190081113ae 2024-12-11T02:26:35,989 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/26653e8f2e9e4b6a8990a934839d7b76 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/26653e8f2e9e4b6a8990a934839d7b76 2024-12-11T02:26:35,989 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/761e117a9ec0408c83e1b2d6bfbfce7e 2024-12-11T02:26:35,989 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/770a37dd1b874375b119ce5b5107f79c 2024-12-11T02:26:35,990 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c48e92fcc9f24a6288149846225df8a9 2024-12-11T02:26:35,990 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4a6969b91372408bbfa4571e35498f57 2024-12-11T02:26:35,990 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/459df187d0ba4ec9a7de492853092ae5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/459df187d0ba4ec9a7de492853092ae5 2024-12-11T02:26:35,990 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d1073e5200514bea8ed68787adbba8de 2024-12-11T02:26:35,991 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/45da21b47870457e84cb3a5a425939d8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/45da21b47870457e84cb3a5a425939d8 2024-12-11T02:26:35,991 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/7874325c3b754c27a0de538c6585f40d 2024-12-11T02:26:35,991 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d93f5404afc04f65bf02a385039aaab9 2024-12-11T02:26:35,992 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/4e8900f8ad95411eb9b7440f29948e12 2024-12-11T02:26:35,992 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/d2ece349cfaa46a3985c29076a2ebbec 2024-12-11T02:26:35,993 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/bb8b2aa269cd44f08e12534bedb010cb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/bb8b2aa269cd44f08e12534bedb010cb 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/89f09c4b7a36467494c7615a29a84853 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/3f17227cc3fc4d22bb840d1a1b705e8c 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/21b20d96c3ed49cbad2a23e8d8434b8b 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/0a63f8002e7d40e79fe59def7f0e546f 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8eacbf072d3c4e71a0b59f5d218fd8f3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/8eacbf072d3c4e71a0b59f5d218fd8f3 2024-12-11T02:26:35,994 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/43451a20b5d341d085ad9f86e5f102ba 2024-12-11T02:26:36,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ae26cfad364c4d1dabf92856b72d7c8b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7f5cbcd6b40a4b5fa47fa05abb9acebf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f78a35c0d05e4a0c84cfb6b657c00347, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/0df88f1bca1c47fcb5cb2677f4ebe899, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4c9d1223076549a98b5f90cfa40bacb6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6c997fa72f08456db0a3a58d289fbb89, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/afc07a5fecd642d5ba6170438bc24218, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b495fd5b643c49d38ca24ab555736503, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb7ce8c6ad7b46a3a6e9cbca4252a455, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/c37b6a11acf146fd9bc4605173b74592, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/eaec0ce6246048a98dcf8a10fd52921c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b95a17ee4e344d7eb999ba92cce1971d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d66c6e1cd4f0458abd684c9273181d62, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a] to archive 2024-12-11T02:26:36,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ae26cfad364c4d1dabf92856b72d7c8b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ae26cfad364c4d1dabf92856b72d7c8b 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/5bfb029ec38d465783025a5ea71dc752 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/14088e5b0e6f45caaf7a2fc6eed4d3a3 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7f5cbcd6b40a4b5fa47fa05abb9acebf to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7f5cbcd6b40a4b5fa47fa05abb9acebf 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6f963a4fe50247d28916067fb847b06a 2024-12-11T02:26:36,018 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1f022733ac1c4089a982137093ac0d93 2024-12-11T02:26:36,020 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1ffa7641ec224dad9796d220a63cacb6 2024-12-11T02:26:36,020 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/da6b928ff11c449093c6b22eef965a1e 2024-12-11T02:26:36,020 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ba9bd185b2fb4081a0ebfc23d479116b 2024-12-11T02:26:36,021 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/0df88f1bca1c47fcb5cb2677f4ebe899 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/0df88f1bca1c47fcb5cb2677f4ebe899 2024-12-11T02:26:36,021 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/68aaaa7b118f46829131c9f7c4ab9001 2024-12-11T02:26:36,021 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f78a35c0d05e4a0c84cfb6b657c00347 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f78a35c0d05e4a0c84cfb6b657c00347 2024-12-11T02:26:36,021 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/982cdd6602e341d89ab28c566f5b4e6b 2024-12-11T02:26:36,022 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/2ebe61bd92b44f4f89e30b8b9d8b3f29 2024-12-11T02:26:36,023 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b314120db62b428abce5f47870cdc0ea 2024-12-11T02:26:36,024 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b5ad281445f34ae8acb7742fd0ebfded 2024-12-11T02:26:36,024 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6c997fa72f08456db0a3a58d289fbb89 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6c997fa72f08456db0a3a58d289fbb89 2024-12-11T02:26:36,024 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8b8f823b8447448389e1665cca132a85 2024-12-11T02:26:36,026 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/9f9848ff18f34e2285315167c5766b9b 2024-12-11T02:26:36,026 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb1ffb1b19464146af2b61b6cf8922a3 2024-12-11T02:26:36,026 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4c9d1223076549a98b5f90cfa40bacb6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4c9d1223076549a98b5f90cfa40bacb6 2024-12-11T02:26:36,027 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/225bb268542e4d85847a572d6d0eacff 2024-12-11T02:26:36,028 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/afc07a5fecd642d5ba6170438bc24218 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/afc07a5fecd642d5ba6170438bc24218 2024-12-11T02:26:36,028 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d5a491ad5fc642e69ca838cd279f587b 2024-12-11T02:26:36,029 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f092e29194564f7380772d77feb76ab4 2024-12-11T02:26:36,030 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/955cfe48ec2b4de29a2220f542b0d79a 2024-12-11T02:26:36,030 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/00ce75f7f8fb4186845ae4754dccbe46 2024-12-11T02:26:36,030 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb7ce8c6ad7b46a3a6e9cbca4252a455 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/cb7ce8c6ad7b46a3a6e9cbca4252a455 2024-12-11T02:26:36,030 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b495fd5b643c49d38ca24ab555736503 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b495fd5b643c49d38ca24ab555736503 2024-12-11T02:26:36,031 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/c37b6a11acf146fd9bc4605173b74592 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/c37b6a11acf146fd9bc4605173b74592 2024-12-11T02:26:36,031 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/50430165b1114cfd910e9e4b8b0db2c3 2024-12-11T02:26:36,032 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/6adeee4da88d40f6ab40888666fc6e77 2024-12-11T02:26:36,032 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/f64cb4a72e884849924b4a56363963a3 2024-12-11T02:26:36,033 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/eaec0ce6246048a98dcf8a10fd52921c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/eaec0ce6246048a98dcf8a10fd52921c 2024-12-11T02:26:36,033 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/46a32f20c2c84227a6d682ba6c46c0de 2024-12-11T02:26:36,033 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ef67560add9a4b2088e32709cb122623 2024-12-11T02:26:36,033 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/8749e95ed05a4902a5b4c4854e6ef724 2024-12-11T02:26:36,033 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/ed56ff45653c40dfba24c5e9fe0f84cc 2024-12-11T02:26:36,034 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b541dbeec4344800826afbd7ca6f4e39 2024-12-11T02:26:36,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/dca396eefb3946af922a5c0272d8130d 2024-12-11T02:26:36,036 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b95a17ee4e344d7eb999ba92cce1971d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/b95a17ee4e344d7eb999ba92cce1971d 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d66c6e1cd4f0458abd684c9273181d62 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/d66c6e1cd4f0458abd684c9273181d62 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/3d00bfa3d01c464caf3dee2e0308500e 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/e6725f25cbde4a638c5975690c434407 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/4645fb24ea2e479c9de5a0b61d7ba50b 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/02efacd73fec413e987977e517c497b7 2024-12-11T02:26:36,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/49480c14d1724475a5d9cb13b9d4999a 2024-12-11T02:26:36,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/41fc4d25ee1442ddb7b3a9520d35ead7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d27ab1af78384da4a025c0fc883ecc27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8d133dadc4df4353bff395ebb1ce4901, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/da6a5615cae04b9f9d645ebdbc5fdc97, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6913f90924104a5aa1e1e01a9f123277, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/c5ae27af21c04545a4139e0a21fa7193, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/274f85fecec24b3baacee988f12904b1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/5eb56391ec0047a1b16b3f2a6b81b9cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6bbf63e671214843b3e5d011e6b87b42, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/45b0b6ca6c53452997bc1ad39ca2884f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2f209482e36e40edb06ebb33551fe709, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2a9a3366b226420f9ead1dde6b932084, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0ab0c276637547ff888f043a9dfc647f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684] to archive 2024-12-11T02:26:36,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/38b059b159ec4211b85f20086534ced5 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/41fc4d25ee1442ddb7b3a9520d35ead7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/41fc4d25ee1442ddb7b3a9520d35ead7 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/1b6449bef95c43c8b5bc38e705c75dc9 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d27ab1af78384da4a025c0fc883ecc27 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d27ab1af78384da4a025c0fc883ecc27 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/f4fb3a15e30244bda564ea2022d1ad25 2024-12-11T02:26:36,044 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/95d2c00ebae44b55b9f08ba01dcebb27 2024-12-11T02:26:36,047 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/ce30034d54034cf981e191616bfe81ce 2024-12-11T02:26:36,047 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3d074bda511f4ca1b43dd17ac00753be 2024-12-11T02:26:36,047 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a314f77b630d4887bd9148b707d2adf1 2024-12-11T02:26:36,047 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b50965091c94458395f7deaeac10e917 2024-12-11T02:26:36,048 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/da6a5615cae04b9f9d645ebdbc5fdc97 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/da6a5615cae04b9f9d645ebdbc5fdc97 2024-12-11T02:26:36,048 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3ce40ce569324bd8a72fd9ae1421d007 2024-12-11T02:26:36,050 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8d133dadc4df4353bff395ebb1ce4901 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8d133dadc4df4353bff395ebb1ce4901 2024-12-11T02:26:36,051 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/9853472c21ce468daa05b5e3c207e9ec 2024-12-11T02:26:36,051 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/c5ae27af21c04545a4139e0a21fa7193 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/c5ae27af21c04545a4139e0a21fa7193 2024-12-11T02:26:36,052 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6913f90924104a5aa1e1e01a9f123277 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6913f90924104a5aa1e1e01a9f123277 2024-12-11T02:26:36,053 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/62002c025f8340fdb6e34658e3e6efe4 2024-12-11T02:26:36,053 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b4f8a5b8e4fa44dda74443df6ed59b55 2024-12-11T02:26:36,054 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/02e5da6f34b244618664fe086acea155 2024-12-11T02:26:36,054 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/619528ab4f504ba4b1f1becf386f6276 2024-12-11T02:26:36,055 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/def099453b7647e1850ba2373a63180b 2024-12-11T02:26:36,055 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/274f85fecec24b3baacee988f12904b1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/274f85fecec24b3baacee988f12904b1 2024-12-11T02:26:36,055 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8145e623584e446aadffe66803004499 2024-12-11T02:26:36,055 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/24fbf2c100d444509b3ec246c39f6eba 2024-12-11T02:26:36,056 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0e832a919c4841ae81939ad372e65975 2024-12-11T02:26:36,057 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/3c5eca28920d4389a7ebd8c1046ba770 2024-12-11T02:26:36,057 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/5eb56391ec0047a1b16b3f2a6b81b9cc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/5eb56391ec0047a1b16b3f2a6b81b9cc 2024-12-11T02:26:36,060 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/7f10ea355fca44179e2f3c420f6b1362 2024-12-11T02:26:36,060 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6bbf63e671214843b3e5d011e6b87b42 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/6bbf63e671214843b3e5d011e6b87b42 2024-12-11T02:26:36,060 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/436c83b913c4467ca616baa1308deed6 2024-12-11T02:26:36,060 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/050df7010743427f864e313b32ce83ee 2024-12-11T02:26:36,060 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/45b0b6ca6c53452997bc1ad39ca2884f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/45b0b6ca6c53452997bc1ad39ca2884f 2024-12-11T02:26:36,062 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/280cb76f67eb4e4786de7eba3b837e6e 2024-12-11T02:26:36,062 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/08cc35c32afc4c01b73b710e62ab9ba5 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/438a423e9f444a588251ef75a0a890d8 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/799a59b149324ac69e7ca49b5d9fc2bf 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/29c4cfef3f0742e4aab0bf13f7c4e851 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a8317d6036b4410c8ae41b8d92ca0289 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2f209482e36e40edb06ebb33551fe709 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2f209482e36e40edb06ebb33551fe709 2024-12-11T02:26:36,063 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2a9a3366b226420f9ead1dde6b932084 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/2a9a3366b226420f9ead1dde6b932084 2024-12-11T02:26:36,066 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/00993a93fd2a4b768b1c6becf3a569d4 2024-12-11T02:26:36,066 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0ab0c276637547ff888f043a9dfc647f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/0ab0c276637547ff888f043a9dfc647f 2024-12-11T02:26:36,066 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d023494905374ac68866ea5ee9910e9f 2024-12-11T02:26:36,066 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/dadab8c48a4f477aac2b4a80c4a20684 2024-12-11T02:26:36,067 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/b92c590d94bf4c3d9d6b2d13cb1cd66f 2024-12-11T02:26:36,067 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/a90a370a81494047a003d7e3f574a92a 2024-12-11T02:26:36,067 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/93e26657e212403ea4b3fe009a4367f2 2024-12-11T02:26:36,076 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/recovered.edits/708.seqid, newMaxSeqId=708, maxSeqId=1 2024-12-11T02:26:36,079 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956. 2024-12-11T02:26:36,080 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] regionserver.HRegion(1635): Region close journal for 422539d3733f091ff661b5e7e0fc5956: 2024-12-11T02:26:36,082 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=31}] handler.UnassignRegionHandler(170): Closed 422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:36,082 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=422539d3733f091ff661b5e7e0fc5956, regionState=CLOSED 2024-12-11T02:26:36,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-11T02:26:36,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseRegionProcedure 422539d3733f091ff661b5e7e0fc5956, server=5f57a24c5131,40311,1733883964600 in 1.5420 sec 2024-12-11T02:26:36,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=30, resume processing ppid=29 2024-12-11T02:26:36,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, ppid=29, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=422539d3733f091ff661b5e7e0fc5956, UNASSIGN in 1.5460 sec 2024-12-11T02:26:36,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-11T02:26:36,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5540 sec 2024-12-11T02:26:36,090 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883996090"}]},"ts":"1733883996090"} 2024-12-11T02:26:36,091 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:26:36,097 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:26:36,099 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5740 sec 2024-12-11T02:26:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T02:26:36,634 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-11T02:26:36,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:26:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,644 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=32, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,645 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=32, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T02:26:36,648 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:36,652 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/recovered.edits] 2024-12-11T02:26:36,655 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/ac6b448a10d94bfeab8d1df9fa790034 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/ac6b448a10d94bfeab8d1df9fa790034 2024-12-11T02:26:36,655 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c7434dac3120411fa1814931adc874be to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/c7434dac3120411fa1814931adc874be 2024-12-11T02:26:36,655 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/be6f34bbfc03472faf2d6fcb4265ebc8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/A/be6f34bbfc03472faf2d6fcb4265ebc8 2024-12-11T02:26:36,659 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1393c88419c9474e904c4a2287c7ad78 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/1393c88419c9474e904c4a2287c7ad78 2024-12-11T02:26:36,659 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/a22ad1f4b30744d98518015c567ceb2d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/a22ad1f4b30744d98518015c567ceb2d 2024-12-11T02:26:36,659 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7b6c2e3b794f45f5a9f3402fc69788eb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/B/7b6c2e3b794f45f5a9f3402fc69788eb 2024-12-11T02:26:36,663 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8805d15c71584080ae797bc7cc359112 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/8805d15c71584080ae797bc7cc359112 2024-12-11T02:26:36,663 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/87cf69749f7b4fd694687a9a3157be4e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/87cf69749f7b4fd694687a9a3157be4e 2024-12-11T02:26:36,663 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d371b9501fdb4c258e6cd10248767751 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/C/d371b9501fdb4c258e6cd10248767751 2024-12-11T02:26:36,666 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/recovered.edits/708.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956/recovered.edits/708.seqid 2024-12-11T02:26:36,667 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/422539d3733f091ff661b5e7e0fc5956 2024-12-11T02:26:36,667 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:26:36,673 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=32, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-11T02:26:36,680 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:26:36,718 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:26:36,720 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=32, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,720 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:26:36,721 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733883996720"}]},"ts":"9223372036854775807"} 2024-12-11T02:26:36,724 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:26:36,724 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 422539d3733f091ff661b5e7e0fc5956, NAME => 'TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:26:36,724 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:26:36,724 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733883996724"}]},"ts":"9223372036854775807"} 2024-12-11T02:26:36,727 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:26:36,729 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=32, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-12-11T02:26:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T02:26:36,747 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-11T02:26:36,765 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=245 (was 219) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5f57a24c5131:40311-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_440387272_22 at /127.0.0.1:47576 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=268 (was 125) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4529 (was 5060) 2024-12-11T02:26:36,778 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=245, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=268, ProcessCount=11, AvailableMemoryMB=4528 2024-12-11T02:26:36,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:26:36,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:26:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:36,783 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:26:36,783 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:36,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 33 2024-12-11T02:26:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:36,784 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:26:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741989_1165 (size=963) 2024-12-11T02:26:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:37,194 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:26:37,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741990_1166 (size=53) 2024-12-11T02:26:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ccefedb36bdc39d0abb7cf1c7bd657fc, disabling compactions & flushes 2024-12-11T02:26:37,601 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. after waiting 0 ms 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,601 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,601 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:37,603 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:26:37,603 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733883997603"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733883997603"}]},"ts":"1733883997603"} 2024-12-11T02:26:37,604 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:26:37,605 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:26:37,605 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883997605"}]},"ts":"1733883997605"} 2024-12-11T02:26:37,606 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:26:37,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, ASSIGN}] 2024-12-11T02:26:37,610 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, ASSIGN 2024-12-11T02:26:37,611 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:26:37,676 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T02:26:37,678 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T02:26:37,762 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:37,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; OpenRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:37,915 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:37,919 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,919 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7285): Opening region: {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:37,920 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,920 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:37,920 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7327): checking encryption for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,920 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(7330): checking classloading for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,921 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,923 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:37,923 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName A 2024-12-11T02:26:37,923 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:37,923 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:37,924 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,925 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:37,925 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName B 2024-12-11T02:26:37,926 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:37,926 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:37,926 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,927 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:37,928 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName C 2024-12-11T02:26:37,928 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:37,928 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:37,928 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,929 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,929 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,931 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:26:37,932 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1085): writing seq id for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:37,934 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:26:37,935 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1102): Opened ccefedb36bdc39d0abb7cf1c7bd657fc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66840082, jitterRate=-0.004005163908004761}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:26:37,936 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegion(1001): Region open journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:37,937 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., pid=35, masterSystemTime=1733883997915 2024-12-11T02:26:37,938 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,938 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=35}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:37,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:37,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-11T02:26:37,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; OpenRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 in 177 msec 2024-12-11T02:26:37,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-11T02:26:37,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, ASSIGN in 333 msec 2024-12-11T02:26:37,944 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:26:37,944 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733883997944"}]},"ts":"1733883997944"} 2024-12-11T02:26:37,945 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:26:37,948 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=33, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:26:37,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-12-11T02:26:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=33 2024-12-11T02:26:38,890 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 33 completed 2024-12-11T02:26:38,891 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26401a5f to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@407e6b5c 2024-12-11T02:26:38,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb305fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:38,898 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:38,900 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:38,902 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:26:38,904 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:26:38,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:26:38,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T02:26:38,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741991_1167 (size=999) 2024-12-11T02:26:39,335 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T02:26:39,336 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T02:26:39,339 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:26:39,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, REOPEN/MOVE}] 2024-12-11T02:26:39,348 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, REOPEN/MOVE 2024-12-11T02:26:39,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,350 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:26:39,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:39,502 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,502 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,503 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:26:39,503 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing ccefedb36bdc39d0abb7cf1c7bd657fc, disabling compactions & flushes 2024-12-11T02:26:39,503 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,503 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,503 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. after waiting 0 ms 2024-12-11T02:26:39,503 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,507 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T02:26:39,508 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,508 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:39,508 WARN [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionServer(3786): Not adding moved region record: ccefedb36bdc39d0abb7cf1c7bd657fc to self. 2024-12-11T02:26:39,509 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,510 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=CLOSED 2024-12-11T02:26:39,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-11T02:26:39,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 in 161 msec 2024-12-11T02:26:39,512 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, REOPEN/MOVE; state=CLOSED, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=true 2024-12-11T02:26:39,663 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE; OpenRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:26:39,816 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,820 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,820 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7285): Opening region: {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:26:39,820 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,820 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:26:39,820 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7327): checking encryption for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,820 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7330): checking classloading for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,823 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,823 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:39,828 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName A 2024-12-11T02:26:39,830 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:39,831 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:39,831 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,832 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:39,832 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName B 2024-12-11T02:26:39,832 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:39,833 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:39,833 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,834 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:26:39,834 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccefedb36bdc39d0abb7cf1c7bd657fc columnFamilyName C 2024-12-11T02:26:39,834 DEBUG [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:39,835 INFO [StoreOpener-ccefedb36bdc39d0abb7cf1c7bd657fc-1 {}] regionserver.HStore(327): Store=ccefedb36bdc39d0abb7cf1c7bd657fc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:26:39,835 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,836 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,837 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,838 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:26:39,839 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1085): writing seq id for ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,840 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1102): Opened ccefedb36bdc39d0abb7cf1c7bd657fc; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73435353, jitterRate=0.09427203238010406}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:26:39,842 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1001): Region open journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:39,843 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., pid=40, masterSystemTime=1733883999816 2024-12-11T02:26:39,844 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,844 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:39,845 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=OPEN, openSeqNum=5, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-11T02:26:39,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; OpenRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 in 182 msec 2024-12-11T02:26:39,849 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-11T02:26:39,849 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, REOPEN/MOVE in 500 msec 2024-12-11T02:26:39,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-11T02:26:39,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-11T02:26:39,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 940 msec 2024-12-11T02:26:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-11T02:26:39,863 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-12-11T02:26:39,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e3a4420 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebda6ad 2024-12-11T02:26:39,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-11T02:26:39,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,882 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-11T02:26:39,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,887 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-12-11T02:26:39,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,895 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-11T02:26:39,898 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,899 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-11T02:26:39,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,904 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-11T02:26:39,906 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,908 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c1ec7ee to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63e87c8 2024-12-11T02:26:39,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a027db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:26:39,916 DEBUG [hconnection-0x3266889-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,916 DEBUG [hconnection-0x50d38d10-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,916 DEBUG [hconnection-0x19340437-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,916 DEBUG [hconnection-0x37ab4144-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,917 DEBUG [hconnection-0x3313d567-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,918 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,918 DEBUG [hconnection-0x3947021a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,918 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,919 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,919 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,919 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,920 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-11T02:26:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:39,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:39,929 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:39,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:39,934 DEBUG [hconnection-0x17b22f8e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,934 DEBUG [hconnection-0x6124160f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,935 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,935 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:39,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:39,941 DEBUG [hconnection-0x1dce59c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:26:39,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:39,944 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:26:39,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884059988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:39,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884059993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116bc9a6b59a424273ab61b7d4bfc2fa2b_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733883999938/Put/seqid=0 2024-12-11T02:26:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884059995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884059995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884059997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:40,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741992_1168 (size=12154) 2024-12-11T02:26:40,047 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:40,054 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116bc9a6b59a424273ab61b7d4bfc2fa2b_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116bc9a6b59a424273ab61b7d4bfc2fa2b_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:40,056 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/d3d9b06a77eb4cb790e30cb0c9d16a33, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:40,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/d3d9b06a77eb4cb790e30cb0c9d16a33 is 175, key is test_row_0/A:col10/1733883999938/Put/seqid=0 2024-12-11T02:26:40,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884060097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884060102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884060108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884060109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884060111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741993_1169 (size=30955) 2024-12-11T02:26:40,115 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/d3d9b06a77eb4cb790e30cb0c9d16a33 2024-12-11T02:26:40,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb is 50, key is test_row_0/B:col10/1733883999938/Put/seqid=0 2024-12-11T02:26:40,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741994_1170 (size=12001) 2024-12-11T02:26:40,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:40,236 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884060303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884060304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884060328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884060329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884060329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,390 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:40,544 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb 2024-12-11T02:26:40,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884060609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884060629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884060634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884060634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884060637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/36621148923f46e3b4925181948817e1 is 50, key is test_row_0/C:col10/1733883999938/Put/seqid=0 2024-12-11T02:26:40,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741995_1171 (size=12001) 2024-12-11T02:26:40,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,856 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:40,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:40,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:40,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,011 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:41,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/36621148923f46e3b4925181948817e1 2024-12-11T02:26:41,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/d3d9b06a77eb4cb790e30cb0c9d16a33 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33 2024-12-11T02:26:41,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33, entries=150, sequenceid=17, filesize=30.2 K 2024-12-11T02:26:41,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb 2024-12-11T02:26:41,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb, entries=150, sequenceid=17, filesize=11.7 K 2024-12-11T02:26:41,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/36621148923f46e3b4925181948817e1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1 2024-12-11T02:26:41,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1, entries=150, sequenceid=17, filesize=11.7 K 2024-12-11T02:26:41,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1149ms, sequenceid=17, compaction requested=false 2024-12-11T02:26:41,088 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T02:26:41,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:41,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:41,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211514000ccb2e54b6a8b9eea7a76f71f53_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733883999991/Put/seqid=0 2024-12-11T02:26:41,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741996_1172 (size=14594) 2024-12-11T02:26:41,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884061158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884061158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884061160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,171 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:41,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,177 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211514000ccb2e54b6a8b9eea7a76f71f53_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211514000ccb2e54b6a8b9eea7a76f71f53_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884061170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,180 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0c55be63af64c34b14a5efd8198e7cb, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884061158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0c55be63af64c34b14a5efd8198e7cb is 175, key is test_row_0/A:col10/1733883999991/Put/seqid=0 2024-12-11T02:26:41,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741997_1173 (size=39549) 2024-12-11T02:26:41,206 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0c55be63af64c34b14a5efd8198e7cb 2024-12-11T02:26:41,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a09b5b5a37bc466eb0debcf10c836c27 is 50, key is test_row_0/B:col10/1733883999991/Put/seqid=0 2024-12-11T02:26:41,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741998_1174 (size=12001) 2024-12-11T02:26:41,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a09b5b5a37bc466eb0debcf10c836c27 2024-12-11T02:26:41,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/55658bf27cbe40d4803a8c481cacae48 is 50, key is test_row_0/C:col10/1733883999991/Put/seqid=0 2024-12-11T02:26:41,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741999_1175 (size=12001) 2024-12-11T02:26:41,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/55658bf27cbe40d4803a8c481cacae48 2024-12-11T02:26:41,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0c55be63af64c34b14a5efd8198e7cb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb 2024-12-11T02:26:41,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb, entries=200, sequenceid=42, filesize=38.6 K 2024-12-11T02:26:41,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884061272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a09b5b5a37bc466eb0debcf10c836c27 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27 2024-12-11T02:26:41,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27, entries=150, sequenceid=42, filesize=11.7 K 2024-12-11T02:26:41,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/55658bf27cbe40d4803a8c481cacae48 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48 2024-12-11T02:26:41,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884061272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884061272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48, entries=150, sequenceid=42, filesize=11.7 K 2024-12-11T02:26:41,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for ccefedb36bdc39d0abb7cf1c7bd657fc in 166ms, sequenceid=42, compaction requested=false 2024-12-11T02:26:41,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T02:26:41,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:41,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:41,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:41,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115998667be231465196dd82da950299dc_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,319 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742000_1176 (size=17034) 2024-12-11T02:26:41,340 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:41,347 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115998667be231465196dd82da950299dc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115998667be231465196dd82da950299dc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,349 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5d43ed1375c347819ff5be16ba453c96, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5d43ed1375c347819ff5be16ba453c96 is 175, key is test_row_0/A:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742001_1177 (size=48139) 2024-12-11T02:26:41,366 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=58, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5d43ed1375c347819ff5be16ba453c96 2024-12-11T02:26:41,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/8f0c524aafe64ab786729904ca52ddaa is 50, key is test_row_0/B:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742002_1178 (size=12001) 2024-12-11T02:26:41,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/8f0c524aafe64ab786729904ca52ddaa 2024-12-11T02:26:41,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884061398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884061399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b7947152e1e643709afdcfc2119786ab is 50, key is test_row_0/C:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742003_1179 (size=12001) 2024-12-11T02:26:41,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b7947152e1e643709afdcfc2119786ab 2024-12-11T02:26:41,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5d43ed1375c347819ff5be16ba453c96 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96 2024-12-11T02:26:41,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96, entries=250, sequenceid=58, filesize=47.0 K 2024-12-11T02:26:41,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/8f0c524aafe64ab786729904ca52ddaa as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa 2024-12-11T02:26:41,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa, entries=150, sequenceid=58, filesize=11.7 K 2024-12-11T02:26:41,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b7947152e1e643709afdcfc2119786ab as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab 2024-12-11T02:26:41,473 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884061474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab, entries=150, sequenceid=58, filesize=11.7 K 2024-12-11T02:26:41,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for ccefedb36bdc39d0abb7cf1c7bd657fc in 179ms, sequenceid=58, compaction requested=true 2024-12-11T02:26:41,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:41,479 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:41,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:41,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:41,481 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:41,481 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:41,481 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,481 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=115.9 K 2024-12-11T02:26:41,481 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,481 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96] 2024-12-11T02:26:41,482 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3d9b06a77eb4cb790e30cb0c9d16a33, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733883999933 2024-12-11T02:26:41,483 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:41,483 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0c55be63af64c34b14a5efd8198e7cb, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733883999987 2024-12-11T02:26:41,484 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d43ed1375c347819ff5be16ba453c96, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001154 2024-12-11T02:26:41,484 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:41,484 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:41,485 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,485 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=35.2 K 2024-12-11T02:26:41,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:41,485 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ebd5e36c21b4e4fb508a9ee00d4b2fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733883999933 2024-12-11T02:26:41,486 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a09b5b5a37bc466eb0debcf10c836c27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733883999987 2024-12-11T02:26:41,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,487 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f0c524aafe64ab786729904ca52ddaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001290 2024-12-11T02:26:41,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:41,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:41,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:26:41,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:41,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:41,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:41,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:41,509 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,518 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#160 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:41,519 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/051d85e4b91b4a8198dc6ca0a41ed570 is 50, key is test_row_0/B:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,522 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211f19d9dfa9d424c7d8bc4fd42996ec37b_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,537 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211f19d9dfa9d424c7d8bc4fd42996ec37b_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,537 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f19d9dfa9d424c7d8bc4fd42996ec37b_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f710dfd99cd14be3897be069de6b6af5_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884001495/Put/seqid=0 2024-12-11T02:26:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742004_1180 (size=12104) 2024-12-11T02:26:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742005_1181 (size=4469) 2024-12-11T02:26:41,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742006_1182 (size=14594) 2024-12-11T02:26:41,560 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#159 average throughput is 0.48 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:41,562 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/47438701b69e49b1bca852844feec8ca is 175, key is test_row_0/A:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:41,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742007_1183 (size=31058) 2024-12-11T02:26:41,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884061532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884061586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884061586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884061586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,645 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:26:41,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884061688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884061696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884061696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884061698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884061781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884061893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884061903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884061904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:41,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884061905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:41,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:41,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:41,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:41,957 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/051d85e4b91b4a8198dc6ca0a41ed570 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/051d85e4b91b4a8198dc6ca0a41ed570 2024-12-11T02:26:41,961 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:41,969 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 051d85e4b91b4a8198dc6ca0a41ed570(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:41,969 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:41,969 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884001482; duration=0sec 2024-12-11T02:26:41,969 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:41,969 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:41,970 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:41,973 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f710dfd99cd14be3897be069de6b6af5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f710dfd99cd14be3897be069de6b6af5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:41,975 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/43ccf72006d742639405cd6d382d4f97, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:41,975 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:41,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/43ccf72006d742639405cd6d382d4f97 is 175, key is test_row_0/A:col10/1733884001495/Put/seqid=0 2024-12-11T02:26:41,976 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:41,976 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:41,979 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=35.2 K 2024-12-11T02:26:41,980 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 36621148923f46e3b4925181948817e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733883999933 2024-12-11T02:26:41,980 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/47438701b69e49b1bca852844feec8ca as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca 2024-12-11T02:26:41,981 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 55658bf27cbe40d4803a8c481cacae48, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733883999987 2024-12-11T02:26:41,982 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b7947152e1e643709afdcfc2119786ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001290 2024-12-11T02:26:41,987 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 47438701b69e49b1bca852844feec8ca(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:41,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:41,987 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884001479; duration=0sec 2024-12-11T02:26:41,988 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:41,988 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:42,017 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#162 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:42,018 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/722dc311b1f348668e870c3089651dee is 50, key is test_row_0/C:col10/1733884001298/Put/seqid=0 2024-12-11T02:26:42,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742008_1184 (size=39549) 2024-12-11T02:26:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:42,043 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/43ccf72006d742639405cd6d382d4f97 2024-12-11T02:26:42,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/05703ce21baf47c2971431c83bc88e22 is 50, key is test_row_0/B:col10/1733884001495/Put/seqid=0 2024-12-11T02:26:42,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742009_1185 (size=12104) 2024-12-11T02:26:42,077 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/722dc311b1f348668e870c3089651dee as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/722dc311b1f348668e870c3089651dee 2024-12-11T02:26:42,084 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 722dc311b1f348668e870c3089651dee(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:42,084 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:42,084 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884001489; duration=0sec 2024-12-11T02:26:42,085 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:42,085 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:42,089 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:42,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:42,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742010_1186 (size=12001) 2024-12-11T02:26:42,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/05703ce21baf47c2971431c83bc88e22 2024-12-11T02:26:42,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/0c89c1b0ff484294b6d8f478ae8c888a is 50, key is test_row_0/C:col10/1733884001495/Put/seqid=0 2024-12-11T02:26:42,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742011_1187 (size=12001) 2024-12-11T02:26:42,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884062196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884062206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884062206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884062209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:42,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:42,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884062286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,397 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/0c89c1b0ff484294b6d8f478ae8c888a 2024-12-11T02:26:42,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/43ccf72006d742639405cd6d382d4f97 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97 2024-12-11T02:26:42,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:42,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97, entries=200, sequenceid=82, filesize=38.6 K 2024-12-11T02:26:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/05703ce21baf47c2971431c83bc88e22 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22 2024-12-11T02:26:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22, entries=150, sequenceid=82, filesize=11.7 K 2024-12-11T02:26:42,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/0c89c1b0ff484294b6d8f478ae8c888a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a 2024-12-11T02:26:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a, entries=150, sequenceid=82, filesize=11.7 K 2024-12-11T02:26:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1094ms, sequenceid=82, compaction requested=false 2024-12-11T02:26:42,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-11T02:26:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:42,706 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:26:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:42,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:42,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:42,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:42,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:42,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:42,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:42,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b85c8a1c6d304441a92cd3afdcfab41f_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884001536/Put/seqid=0 2024-12-11T02:26:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884062745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884062746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884062749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884062751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742012_1188 (size=12154) 2024-12-11T02:26:42,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:42,773 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b85c8a1c6d304441a92cd3afdcfab41f_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b85c8a1c6d304441a92cd3afdcfab41f_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:42,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8480fe36c375418eb4e3b793508616d9, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:42,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8480fe36c375418eb4e3b793508616d9 is 175, key is test_row_0/A:col10/1733884001536/Put/seqid=0 2024-12-11T02:26:42,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742013_1189 (size=30955) 2024-12-11T02:26:42,809 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8480fe36c375418eb4e3b793508616d9 2024-12-11T02:26:42,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a02596e1762d4c98bf4c490d13741a31 is 50, key is test_row_0/B:col10/1733884001536/Put/seqid=0 2024-12-11T02:26:42,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884062852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884062853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742014_1190 (size=12001) 2024-12-11T02:26:42,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884062865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884062865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884063059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884063059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884063070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884063072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,263 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a02596e1762d4c98bf4c490d13741a31 2024-12-11T02:26:43,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/af641234e9c447eda49a85763dd8d8d3 is 50, key is test_row_0/C:col10/1733884001536/Put/seqid=0 2024-12-11T02:26:43,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884063288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742015_1191 (size=12001) 2024-12-11T02:26:43,312 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/af641234e9c447eda49a85763dd8d8d3 2024-12-11T02:26:43,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8480fe36c375418eb4e3b793508616d9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9 2024-12-11T02:26:43,326 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9, entries=150, sequenceid=98, filesize=30.2 K 2024-12-11T02:26:43,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a02596e1762d4c98bf4c490d13741a31 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31 2024-12-11T02:26:43,335 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31, entries=150, sequenceid=98, filesize=11.7 K 2024-12-11T02:26:43,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/af641234e9c447eda49a85763dd8d8d3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3 2024-12-11T02:26:43,343 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3, entries=150, sequenceid=98, filesize=11.7 K 2024-12-11T02:26:43,346 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for ccefedb36bdc39d0abb7cf1c7bd657fc in 640ms, sequenceid=98, compaction requested=true 2024-12-11T02:26:43,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:43,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:43,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-11T02:26:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-11T02:26:43,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-11T02:26:43,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4200 sec 2024-12-11T02:26:43,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 3.4260 sec 2024-12-11T02:26:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:43,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:26:43,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884063384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884063385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884063385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884063385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121177887b81b3bc496a8f3306ad806d1237_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742016_1192 (size=14594) 2024-12-11T02:26:43,445 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:43,450 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121177887b81b3bc496a8f3306ad806d1237_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121177887b81b3bc496a8f3306ad806d1237_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:43,451 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff is 175, key is test_row_0/A:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884063488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884063489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884063489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884063489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742017_1193 (size=39549) 2024-12-11T02:26:43,499 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=124, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff 2024-12-11T02:26:43,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7987a03df6fa490888399f43479f9fe4 is 50, key is test_row_0/B:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742018_1194 (size=12001) 2024-12-11T02:26:43,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7987a03df6fa490888399f43479f9fe4 2024-12-11T02:26:43,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/7f6a18c2eb974f069a9092960c8a1b05 is 50, key is test_row_0/C:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742019_1195 (size=12001) 2024-12-11T02:26:43,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/7f6a18c2eb974f069a9092960c8a1b05 2024-12-11T02:26:43,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff 2024-12-11T02:26:43,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff, entries=200, sequenceid=124, filesize=38.6 K 2024-12-11T02:26:43,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7987a03df6fa490888399f43479f9fe4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4 2024-12-11T02:26:43,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4, entries=150, sequenceid=124, filesize=11.7 K 2024-12-11T02:26:43,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/7f6a18c2eb974f069a9092960c8a1b05 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05 2024-12-11T02:26:43,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05, entries=150, sequenceid=124, filesize=11.7 K 2024-12-11T02:26:43,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ccefedb36bdc39d0abb7cf1c7bd657fc in 275ms, sequenceid=124, compaction requested=true 2024-12-11T02:26:43,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:43,645 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:43,647 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:43,647 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:43,647 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:43,648 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=137.8 K 2024-12-11T02:26:43,648 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:43,648 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff] 2024-12-11T02:26:43,648 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47438701b69e49b1bca852844feec8ca, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001290 2024-12-11T02:26:43,649 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43ccf72006d742639405cd6d382d4f97, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733884001353 2024-12-11T02:26:43,649 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8480fe36c375418eb4e3b793508616d9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733884001527 2024-12-11T02:26:43,650 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe3ff39bb9a74f50a4bd9dadf2f5e0ff, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:43,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:43,657 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:43,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:43,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:43,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:43,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:43,659 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:43,659 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:43,659 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:43,659 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/051d85e4b91b4a8198dc6ca0a41ed570, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=47.0 K 2024-12-11T02:26:43,661 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 051d85e4b91b4a8198dc6ca0a41ed570, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001290 2024-12-11T02:26:43,661 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 05703ce21baf47c2971431c83bc88e22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733884001353 2024-12-11T02:26:43,661 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a02596e1762d4c98bf4c490d13741a31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733884001527 2024-12-11T02:26:43,662 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7987a03df6fa490888399f43479f9fe4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:43,680 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,682 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#171 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:43,683 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/68088123d257426faf22cf5c2e69a454 is 50, key is test_row_0/B:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,684 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211e4523ebe7b4546969d65b37db0b67228_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,688 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211e4523ebe7b4546969d65b37db0b67228_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,688 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e4523ebe7b4546969d65b37db0b67228_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:43,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:43,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742021_1197 (size=4469) 2024-12-11T02:26:43,736 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#172 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:43,737 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/44b29db4356a4de7a50da4e226b0eacb is 175, key is test_row_0/A:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742020_1196 (size=12241) 2024-12-11T02:26:43,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ae7653b2d05c4ce6b045577e1086f380_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884003698/Put/seqid=0 2024-12-11T02:26:43,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884063779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884063782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884063783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884063783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742022_1198 (size=31195) 2024-12-11T02:26:43,803 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/44b29db4356a4de7a50da4e226b0eacb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb 2024-12-11T02:26:43,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742023_1199 (size=14794) 2024-12-11T02:26:43,806 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:43,810 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 44b29db4356a4de7a50da4e226b0eacb(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:43,810 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:43,810 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=12, startTime=1733884003645; duration=0sec 2024-12-11T02:26:43,810 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:43,810 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:43,810 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:43,814 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ae7653b2d05c4ce6b045577e1086f380_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ae7653b2d05c4ce6b045577e1086f380_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:43,815 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0009e5cd59204a0485599c3c75eeb78e, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:43,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0009e5cd59204a0485599c3c75eeb78e is 175, key is test_row_0/A:col10/1733884003698/Put/seqid=0 2024-12-11T02:26:43,817 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:43,817 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:43,817 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:43,817 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/722dc311b1f348668e870c3089651dee, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=47.0 K 2024-12-11T02:26:43,819 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 722dc311b1f348668e870c3089651dee, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733884001290 2024-12-11T02:26:43,820 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c89c1b0ff484294b6d8f478ae8c888a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733884001353 2024-12-11T02:26:43,820 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting af641234e9c447eda49a85763dd8d8d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733884001527 2024-12-11T02:26:43,821 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f6a18c2eb974f069a9092960c8a1b05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:43,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742024_1200 (size=39749) 2024-12-11T02:26:43,832 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0009e5cd59204a0485599c3c75eeb78e 2024-12-11T02:26:43,841 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#174 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:43,841 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/370ae0ab0a2140ad883c1690b5bb70ea is 50, key is test_row_0/C:col10/1733884003368/Put/seqid=0 2024-12-11T02:26:43,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/0c03af17ebf44c0ea2c759d22cebd2a5 is 50, key is test_row_0/B:col10/1733884003698/Put/seqid=0 2024-12-11T02:26:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742025_1201 (size=12241) 2024-12-11T02:26:43,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742026_1202 (size=12151) 2024-12-11T02:26:43,865 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/370ae0ab0a2140ad883c1690b5bb70ea as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/370ae0ab0a2140ad883c1690b5bb70ea 2024-12-11T02:26:43,870 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 370ae0ab0a2140ad883c1690b5bb70ea(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:43,870 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:43,870 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=12, startTime=1733884003658; duration=0sec 2024-12-11T02:26:43,871 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:43,871 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:43,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884063888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884063888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884063889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:43,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:43,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884063889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-11T02:26:44,035 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-11T02:26:44,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:44,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-11T02:26:44,039 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:44,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:44,044 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:44,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:44,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884064091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884064092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884064093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884064092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:44,151 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/68088123d257426faf22cf5c2e69a454 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/68088123d257426faf22cf5c2e69a454 2024-12-11T02:26:44,159 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 68088123d257426faf22cf5c2e69a454(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:44,159 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:44,159 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=12, startTime=1733884003657; duration=0sec 2024-12-11T02:26:44,159 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:44,159 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:44,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-11T02:26:44,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:44,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:44,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:44,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:44,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:44,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/0c03af17ebf44c0ea2c759d22cebd2a5 2024-12-11T02:26:44,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/24a0c4d1405f4659b687e29cb7f98d9b is 50, key is test_row_0/C:col10/1733884003698/Put/seqid=0 2024-12-11T02:26:44,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742027_1203 (size=12151) 2024-12-11T02:26:44,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/24a0c4d1405f4659b687e29cb7f98d9b 2024-12-11T02:26:44,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0009e5cd59204a0485599c3c75eeb78e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e 2024-12-11T02:26:44,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e, entries=200, sequenceid=135, filesize=38.8 K 2024-12-11T02:26:44,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/0c03af17ebf44c0ea2c759d22cebd2a5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5 2024-12-11T02:26:44,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5, entries=150, sequenceid=135, filesize=11.9 K 2024-12-11T02:26:44,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/24a0c4d1405f4659b687e29cb7f98d9b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b 2024-12-11T02:26:44,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:44,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b, entries=150, sequenceid=135, filesize=11.9 K 2024-12-11T02:26:44,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for ccefedb36bdc39d0abb7cf1c7bd657fc in 648ms, sequenceid=135, compaction requested=false 2024-12-11T02:26:44,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:44,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-11T02:26:44,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:44,350 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:44,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:44,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121190544224e3124147b7361d11337d281e_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884003747/Put/seqid=0 2024-12-11T02:26:44,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742028_1204 (size=12304) 2024-12-11T02:26:44,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:44,387 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121190544224e3124147b7361d11337d281e_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190544224e3124147b7361d11337d281e_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5a4edd3f4924427396dcd10d6115421f, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:44,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5a4edd3f4924427396dcd10d6115421f is 175, key is test_row_0/A:col10/1733884003747/Put/seqid=0 2024-12-11T02:26:44,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:44,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:44,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884064404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884064406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884064409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884064409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742029_1205 (size=31105) 2024-12-11T02:26:44,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884064510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884064510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884064514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884064514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:44,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884064714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884064714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884064718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884064718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:44,819 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=164, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5a4edd3f4924427396dcd10d6115421f 2024-12-11T02:26:44,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a95ac0358b274edba9017db90c96cf3d is 50, key is test_row_0/B:col10/1733884003747/Put/seqid=0 2024-12-11T02:26:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742030_1206 (size=12151) 2024-12-11T02:26:44,848 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a95ac0358b274edba9017db90c96cf3d 2024-12-11T02:26:44,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3677b0f42c774bdb9e1c1f58454598ca is 50, key is test_row_0/C:col10/1733884003747/Put/seqid=0 2024-12-11T02:26:44,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742031_1207 (size=12151) 2024-12-11T02:26:45,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884065016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884065017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884065022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884065022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:45,266 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3677b0f42c774bdb9e1c1f58454598ca 2024-12-11T02:26:45,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/5a4edd3f4924427396dcd10d6115421f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f 2024-12-11T02:26:45,280 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f, entries=150, sequenceid=164, filesize=30.4 K 2024-12-11T02:26:45,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a95ac0358b274edba9017db90c96cf3d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d 2024-12-11T02:26:45,287 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d, entries=150, sequenceid=164, filesize=11.9 K 2024-12-11T02:26:45,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3677b0f42c774bdb9e1c1f58454598ca as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca 2024-12-11T02:26:45,293 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca, entries=150, sequenceid=164, filesize=11.9 K 2024-12-11T02:26:45,294 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ccefedb36bdc39d0abb7cf1c7bd657fc in 944ms, sequenceid=164, compaction requested=true 2024-12-11T02:26:45,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:45,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:45,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-11T02:26:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-11T02:26:45,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-11T02:26:45,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2530 sec 2024-12-11T02:26:45,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.2630 sec 2024-12-11T02:26:45,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:45,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:26:45,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:45,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:45,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:45,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:45,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:45,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:45,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114cb2f881f14c42ca9926d8fc639a115d_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:45,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742032_1208 (size=14794) 2024-12-11T02:26:45,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884065394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884065498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884065519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884065522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884065527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884065529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884065699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:45,782 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:45,787 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114cb2f881f14c42ca9926d8fc639a115d_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114cb2f881f14c42ca9926d8fc639a115d_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:45,788 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0eeaaf09e13841e290fb9e4af9676114, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:45,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0eeaaf09e13841e290fb9e4af9676114 is 175, key is test_row_0/A:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:45,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742033_1209 (size=39749) 2024-12-11T02:26:45,798 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0eeaaf09e13841e290fb9e4af9676114 2024-12-11T02:26:45,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c40d91eba714005a16e4c9c6ec9b19e is 50, key is test_row_0/B:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:45,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742034_1210 (size=12151) 2024-12-11T02:26:46,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884066006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-11T02:26:46,146 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-11T02:26:46,147 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-11T02:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-11T02:26:46,149 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:46,150 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:46,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:46,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c40d91eba714005a16e4c9c6ec9b19e 2024-12-11T02:26:46,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/96242fcef45a4e9b8396b78ce2e9f1f2 is 50, key is test_row_0/C:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:46,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742035_1211 (size=12151) 2024-12-11T02:26:46,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/96242fcef45a4e9b8396b78ce2e9f1f2 2024-12-11T02:26:46,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0eeaaf09e13841e290fb9e4af9676114 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114 2024-12-11T02:26:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-11T02:26:46,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114, entries=200, sequenceid=176, filesize=38.8 K 2024-12-11T02:26:46,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c40d91eba714005a16e4c9c6ec9b19e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e 2024-12-11T02:26:46,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e, entries=150, sequenceid=176, filesize=11.9 K 2024-12-11T02:26:46,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/96242fcef45a4e9b8396b78ce2e9f1f2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2 2024-12-11T02:26:46,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2, entries=150, sequenceid=176, filesize=11.9 K 2024-12-11T02:26:46,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ccefedb36bdc39d0abb7cf1c7bd657fc in 965ms, sequenceid=176, compaction requested=true 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:46,272 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:46,272 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:46,274 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141798 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:46,274 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:46,274 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,274 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=138.5 K 2024-12-11T02:26:46,274 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,274 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114] 2024-12-11T02:26:46,275 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:46,275 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:46,275 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,275 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/68088123d257426faf22cf5c2e69a454, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=47.6 K 2024-12-11T02:26:46,275 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44b29db4356a4de7a50da4e226b0eacb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:46,276 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 68088123d257426faf22cf5c2e69a454, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:46,276 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0009e5cd59204a0485599c3c75eeb78e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733884003382 2024-12-11T02:26:46,276 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c03af17ebf44c0ea2c759d22cebd2a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733884003383 2024-12-11T02:26:46,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a4edd3f4924427396dcd10d6115421f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884003747 2024-12-11T02:26:46,277 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a95ac0358b274edba9017db90c96cf3d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884003747 2024-12-11T02:26:46,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0eeaaf09e13841e290fb9e4af9676114, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:46,277 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c40d91eba714005a16e4c9c6ec9b19e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:46,289 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#183 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:46,290 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ce5af055c9a54cb9a909a5d67f6539d6 is 50, key is test_row_0/B:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:46,295 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,300 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211df8ea1cf1f774c7fbcd2bcfd85cb8318_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,303 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211df8ea1cf1f774c7fbcd2bcfd85cb8318_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,303 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211df8ea1cf1f774c7fbcd2bcfd85cb8318_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-11T02:26:46,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742036_1212 (size=12527) 2024-12-11T02:26:46,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,309 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:26:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:46,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742037_1213 (size=4469) 2024-12-11T02:26:46,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bc7d085477e243cbae0fc9d03d4830fa_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884005383/Put/seqid=0 2024-12-11T02:26:46,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742038_1214 (size=12304) 2024-12-11T02:26:46,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:46,332 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bc7d085477e243cbae0fc9d03d4830fa_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bc7d085477e243cbae0fc9d03d4830fa_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:46,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/cfa0ae00122e4edb980584416d9a3277, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/cfa0ae00122e4edb980584416d9a3277 is 175, key is test_row_0/A:col10/1733884005383/Put/seqid=0 2024-12-11T02:26:46,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742039_1215 (size=31105) 2024-12-11T02:26:46,344 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/cfa0ae00122e4edb980584416d9a3277 2024-12-11T02:26:46,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/050c30ede2d647189ba45c7001939022 is 50, key is test_row_0/B:col10/1733884005383/Put/seqid=0 2024-12-11T02:26:46,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742040_1216 (size=12151) 2024-12-11T02:26:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-11T02:26:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:46,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:46,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884066530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884066533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884066533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884066540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884066540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884066636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884066636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,707 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ce5af055c9a54cb9a909a5d67f6539d6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ce5af055c9a54cb9a909a5d67f6539d6 2024-12-11T02:26:46,709 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#184 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:46,709 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e016ab70b4c64ca5ba5356e0e691399e is 175, key is test_row_0/A:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:46,715 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into ce5af055c9a54cb9a909a5d67f6539d6(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:46,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:46,715 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=12, startTime=1733884006272; duration=0sec 2024-12-11T02:26:46,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:46,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:46,715 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:26:46,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742041_1217 (size=31481) 2024-12-11T02:26:46,717 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48694 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:26:46,717 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:46,717 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,718 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/370ae0ab0a2140ad883c1690b5bb70ea, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=47.6 K 2024-12-11T02:26:46,718 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 370ae0ab0a2140ad883c1690b5bb70ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884002744 2024-12-11T02:26:46,719 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 24a0c4d1405f4659b687e29cb7f98d9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733884003383 2024-12-11T02:26:46,719 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3677b0f42c774bdb9e1c1f58454598ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884003747 2024-12-11T02:26:46,720 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 96242fcef45a4e9b8396b78ce2e9f1f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:46,723 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e016ab70b4c64ca5ba5356e0e691399e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e 2024-12-11T02:26:46,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into e016ab70b4c64ca5ba5356e0e691399e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:46,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:46,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=12, startTime=1733884006272; duration=0sec 2024-12-11T02:26:46,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:46,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:46,734 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#187 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:46,735 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/7b1b933eda6a402f89624fd60e79b734 is 50, key is test_row_0/C:col10/1733884005304/Put/seqid=0 2024-12-11T02:26:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-11T02:26:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742042_1218 (size=12527) 2024-12-11T02:26:46,763 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/050c30ede2d647189ba45c7001939022 2024-12-11T02:26:46,767 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/7b1b933eda6a402f89624fd60e79b734 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7b1b933eda6a402f89624fd60e79b734 2024-12-11T02:26:46,777 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 7b1b933eda6a402f89624fd60e79b734(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:46,777 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:46,777 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=12, startTime=1733884006272; duration=0sec 2024-12-11T02:26:46,777 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:46,777 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:46,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/a83d54b765ea43a382048d867ba72e55 is 50, key is test_row_0/C:col10/1733884005383/Put/seqid=0 2024-12-11T02:26:46,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742043_1219 (size=12151) 2024-12-11T02:26:46,809 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/a83d54b765ea43a382048d867ba72e55 2024-12-11T02:26:46,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/cfa0ae00122e4edb980584416d9a3277 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277 2024-12-11T02:26:46,823 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277, entries=150, sequenceid=200, filesize=30.4 K 2024-12-11T02:26:46,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/050c30ede2d647189ba45c7001939022 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022 2024-12-11T02:26:46,832 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:26:46,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/a83d54b765ea43a382048d867ba72e55 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55 2024-12-11T02:26:46,839 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:26:46,840 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for ccefedb36bdc39d0abb7cf1c7bd657fc in 531ms, sequenceid=200, compaction requested=false 2024-12-11T02:26:46,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:46,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:46,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-11T02:26:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-11T02:26:46,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-11T02:26:46,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 692 msec 2024-12-11T02:26:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:46,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:46,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:46,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 697 msec 2024-12-11T02:26:46,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211518b379c63864a57aa917c90ee45f057_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:46,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884066908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884066908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:46,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742044_1220 (size=14794) 2024-12-11T02:26:46,925 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:46,932 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211518b379c63864a57aa917c90ee45f057_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211518b379c63864a57aa917c90ee45f057_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:46,933 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79cc911b803545e9931dc9a52353741f, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:46,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79cc911b803545e9931dc9a52353741f is 175, key is test_row_0/A:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:46,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742045_1221 (size=39749) 2024-12-11T02:26:47,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884067016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884067016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884067218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884067218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-11T02:26:47,254 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-11T02:26:47,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:47,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-11T02:26:47,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:47,259 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:47,260 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:47,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:47,356 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=218, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79cc911b803545e9931dc9a52353741f 2024-12-11T02:26:47,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:47,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/33918e85164c420f9aaa8be061133850 is 50, key is test_row_0/B:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:47,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:47,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:47,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742046_1222 (size=12151) 2024-12-11T02:26:47,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/33918e85164c420f9aaa8be061133850 2024-12-11T02:26:47,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b93015982e224f439cd6c5f6780c1ab8 is 50, key is test_row_0/C:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:47,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742047_1223 (size=12151) 2024-12-11T02:26:47,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b93015982e224f439cd6c5f6780c1ab8 2024-12-11T02:26:47,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79cc911b803545e9931dc9a52353741f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f 2024-12-11T02:26:47,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f, entries=200, sequenceid=218, filesize=38.8 K 2024-12-11T02:26:47,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/33918e85164c420f9aaa8be061133850 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850 2024-12-11T02:26:47,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850, entries=150, sequenceid=218, filesize=11.9 K 2024-12-11T02:26:47,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/b93015982e224f439cd6c5f6780c1ab8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8 2024-12-11T02:26:47,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8, entries=150, sequenceid=218, filesize=11.9 K 2024-12-11T02:26:47,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ccefedb36bdc39d0abb7cf1c7bd657fc in 668ms, sequenceid=218, compaction requested=true 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:47,516 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:47,516 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:47,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:47,517 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:47,517 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:47,517 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,518 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ce5af055c9a54cb9a909a5d67f6539d6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.0 K 2024-12-11T02:26:47,518 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:47,518 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:47,518 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,519 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=99.9 K 2024-12-11T02:26:47,519 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,519 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f] 2024-12-11T02:26:47,519 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce5af055c9a54cb9a909a5d67f6539d6, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:47,520 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e016ab70b4c64ca5ba5356e0e691399e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:47,520 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 050c30ede2d647189ba45c7001939022, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884005374 2024-12-11T02:26:47,520 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cfa0ae00122e4edb980584416d9a3277, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884005374 2024-12-11T02:26:47,520 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33918e85164c420f9aaa8be061133850, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:47,521 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 79cc911b803545e9931dc9a52353741f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:47,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:26:47,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:47,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,547 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:47,548 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/b2ec6de0abca43a7a538f51e98ec3590 is 50, key is test_row_0/B:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:47,555 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:47,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:47,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884067559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884067559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,567 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:47,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:47,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117139356ed0c046ab82bf2c6db025682b_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884007525/Put/seqid=0 2024-12-11T02:26:47,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,582 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211d57a75b6e09b4ae8b27e53f43520fbf9_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:47,585 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211d57a75b6e09b4ae8b27e53f43520fbf9_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:47,585 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d57a75b6e09b4ae8b27e53f43520fbf9_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:47,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742048_1224 (size=12629) 2024-12-11T02:26:47,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742050_1226 (size=4469) 2024-12-11T02:26:47,623 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#193 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:47,624 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/68c1e43fac1f4888a05c926d75b9b6d0 is 175, key is test_row_0/A:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:47,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742049_1225 (size=17284) 2024-12-11T02:26:47,642 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:47,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117139356ed0c046ab82bf2c6db025682b_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117139356ed0c046ab82bf2c6db025682b_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:47,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/2013ba94a62f442e9cb0929e805fc2bd, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:47,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/2013ba94a62f442e9cb0929e805fc2bd is 175, key is test_row_0/A:col10/1733884007525/Put/seqid=0 2024-12-11T02:26:47,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742051_1227 (size=31583) 2024-12-11T02:26:47,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884067664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884067664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,676 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/68c1e43fac1f4888a05c926d75b9b6d0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0 2024-12-11T02:26:47,683 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 68c1e43fac1f4888a05c926d75b9b6d0(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:47,683 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:47,683 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884007516; duration=0sec 2024-12-11T02:26:47,683 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:47,683 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:47,684 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:47,685 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:47,685 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:47,685 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,685 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7b1b933eda6a402f89624fd60e79b734, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.0 K 2024-12-11T02:26:47,686 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b1b933eda6a402f89624fd60e79b734, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733884004401 2024-12-11T02:26:47,686 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a83d54b765ea43a382048d867ba72e55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884005374 2024-12-11T02:26:47,687 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b93015982e224f439cd6c5f6780c1ab8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:47,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742052_1228 (size=48389) 2024-12-11T02:26:47,693 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=241, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/2013ba94a62f442e9cb0929e805fc2bd 2024-12-11T02:26:47,717 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#196 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:47,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/49d142360f1a4aa6835ac73f08cdf017 is 50, key is test_row_0/B:col10/1733884007525/Put/seqid=0 2024-12-11T02:26:47,721 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/d2bdf91277cd4119bba6dd1ff70bb44f is 50, key is test_row_0/C:col10/1733884006846/Put/seqid=0 2024-12-11T02:26:47,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:47,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:47,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742053_1229 (size=12151) 2024-12-11T02:26:47,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/49d142360f1a4aa6835ac73f08cdf017 2024-12-11T02:26:47,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6da11f09bac742f686756aa15d81e357 is 50, key is test_row_0/C:col10/1733884007525/Put/seqid=0 2024-12-11T02:26:47,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742055_1231 (size=12151) 2024-12-11T02:26:47,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6da11f09bac742f686756aa15d81e357 2024-12-11T02:26:47,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742054_1230 (size=12629) 2024-12-11T02:26:47,766 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/d2bdf91277cd4119bba6dd1ff70bb44f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/d2bdf91277cd4119bba6dd1ff70bb44f 2024-12-11T02:26:47,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/2013ba94a62f442e9cb0929e805fc2bd as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd 2024-12-11T02:26:47,776 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into d2bdf91277cd4119bba6dd1ff70bb44f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:47,776 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:47,776 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884007516; duration=0sec 2024-12-11T02:26:47,776 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:47,776 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:47,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd, entries=250, sequenceid=241, filesize=47.3 K 2024-12-11T02:26:47,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/49d142360f1a4aa6835ac73f08cdf017 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017 2024-12-11T02:26:47,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017, entries=150, sequenceid=241, filesize=11.9 K 2024-12-11T02:26:47,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6da11f09bac742f686756aa15d81e357 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357 2024-12-11T02:26:47,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357, entries=150, sequenceid=241, filesize=11.9 K 2024-12-11T02:26:47,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ccefedb36bdc39d0abb7cf1c7bd657fc in 270ms, sequenceid=241, compaction requested=false 2024-12-11T02:26:47,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:47,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:47,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:47,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:26:47,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:47,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:47,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:47,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121158326472aae5426e8ef4d2a65afe7d50_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:47,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742056_1232 (size=14894) 2024-12-11T02:26:47,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884067912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:47,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:47,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884067913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,014 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/b2ec6de0abca43a7a538f51e98ec3590 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/b2ec6de0abca43a7a538f51e98ec3590 2024-12-11T02:26:48,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884068017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884068017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,020 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into b2ec6de0abca43a7a538f51e98ec3590(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:48,020 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:48,020 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884007516; duration=0sec 2024-12-11T02:26:48,020 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:48,020 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:48,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884068221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884068221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,308 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:48,313 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121158326472aae5426e8ef4d2a65afe7d50_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121158326472aae5426e8ef4d2a65afe7d50_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:48,315 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/78e6d3c6b54e4bd78284feca9b782f75, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:48,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/78e6d3c6b54e4bd78284feca9b782f75 is 175, key is test_row_0/A:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742057_1233 (size=39849) 2024-12-11T02:26:48,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,337 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:48,490 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884068524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884068524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884068541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,544 DEBUG [Thread-800 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:48,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884068548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,550 DEBUG [Thread-792 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:48,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:48,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884068555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,557 DEBUG [Thread-798 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:48,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,725 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=258, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/78e6d3c6b54e4bd78284feca9b782f75 2024-12-11T02:26:48,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/c846e5fb585c4a908790c73676f77512 is 50, key is test_row_0/B:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742058_1234 (size=12251) 2024-12-11T02:26:48,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/c846e5fb585c4a908790c73676f77512 2024-12-11T02:26:48,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/abaa129b0a08426a8c43861ec3d0e2ef is 50, key is test_row_0/C:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742059_1235 (size=12251) 2024-12-11T02:26:48,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/abaa129b0a08426a8c43861ec3d0e2ef 2024-12-11T02:26:48,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/78e6d3c6b54e4bd78284feca9b782f75 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75 2024-12-11T02:26:48,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75, entries=200, sequenceid=258, filesize=38.9 K 2024-12-11T02:26:48,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:48,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:48,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/c846e5fb585c4a908790c73676f77512 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512 2024-12-11T02:26:48,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512, entries=150, sequenceid=258, filesize=12.0 K 2024-12-11T02:26:48,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/abaa129b0a08426a8c43861ec3d0e2ef as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef 2024-12-11T02:26:48,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef, entries=150, sequenceid=258, filesize=12.0 K 2024-12-11T02:26:48,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for ccefedb36bdc39d0abb7cf1c7bd657fc in 949ms, sequenceid=258, compaction requested=true 2024-12-11T02:26:48,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:48,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:48,818 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:48,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:48,818 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:48,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:48,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:48,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:48,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:48,819 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:48,819 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:48,819 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,820 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=117.0 K 2024-12-11T02:26:48,820 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37031 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:48,820 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,820 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:48,820 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75] 2024-12-11T02:26:48,820 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,820 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/b2ec6de0abca43a7a538f51e98ec3590, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.2 K 2024-12-11T02:26:48,820 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68c1e43fac1f4888a05c926d75b9b6d0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:48,821 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b2ec6de0abca43a7a538f51e98ec3590, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:48,821 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2013ba94a62f442e9cb0929e805fc2bd, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733884006897 2024-12-11T02:26:48,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 49d142360f1a4aa6835ac73f08cdf017, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733884006897 2024-12-11T02:26:48,822 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78e6d3c6b54e4bd78284feca9b782f75, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007539 2024-12-11T02:26:48,822 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c846e5fb585c4a908790c73676f77512, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007543 2024-12-11T02:26:48,833 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:48,838 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:48,839 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/047d2effca6645e5bbed5a34ed65075a is 50, key is test_row_0/B:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,840 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211be4c45ba029f4c5cbf94a26d4f6963b4_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:48,842 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211be4c45ba029f4c5cbf94a26d4f6963b4_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:48,843 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211be4c45ba029f4c5cbf94a26d4f6963b4_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:48,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742060_1236 (size=12831) 2024-12-11T02:26:48,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742061_1237 (size=4469) 2024-12-11T02:26:48,852 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#201 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:48,853 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/6b8f2711584a462ba17c383db19506f1 is 175, key is test_row_0/A:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,856 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/047d2effca6645e5bbed5a34ed65075a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/047d2effca6645e5bbed5a34ed65075a 2024-12-11T02:26:48,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742062_1238 (size=31785) 2024-12-11T02:26:48,867 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 047d2effca6645e5bbed5a34ed65075a(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:48,867 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:48,867 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884008818; duration=0sec 2024-12-11T02:26:48,867 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:48,867 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:48,867 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:48,868 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37031 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:48,869 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:48,869 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,869 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/d2bdf91277cd4119bba6dd1ff70bb44f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.2 K 2024-12-11T02:26:48,870 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d2bdf91277cd4119bba6dd1ff70bb44f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733884006531 2024-12-11T02:26:48,870 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6da11f09bac742f686756aa15d81e357, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733884006897 2024-12-11T02:26:48,871 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting abaa129b0a08426a8c43861ec3d0e2ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007543 2024-12-11T02:26:48,881 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#203 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:48,882 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/83d16a36616d46ad81c894fb9af8b839 is 50, key is test_row_0/C:col10/1733884007543/Put/seqid=0 2024-12-11T02:26:48,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742063_1239 (size=12831) 2024-12-11T02:26:48,951 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:48,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T02:26:48,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:48,952 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:48,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:48,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121129fb7ebcadfe4964b1f2969b707fbedc_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884007911/Put/seqid=0 2024-12-11T02:26:48,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742064_1240 (size=12454) 2024-12-11T02:26:49,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,271 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/6b8f2711584a462ba17c383db19506f1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1 2024-12-11T02:26:49,277 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 6b8f2711584a462ba17c383db19506f1(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:49,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,277 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884008818; duration=0sec 2024-12-11T02:26:49,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:49,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:49,302 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/83d16a36616d46ad81c894fb9af8b839 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/83d16a36616d46ad81c894fb9af8b839 2024-12-11T02:26:49,308 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 83d16a36616d46ad81c894fb9af8b839(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:49,308 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,308 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884008819; duration=0sec 2024-12-11T02:26:49,308 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:49,308 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:49,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:49,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:49,377 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121129fb7ebcadfe4964b1f2969b707fbedc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121129fb7ebcadfe4964b1f2969b707fbedc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e98cbed2c55242389b1be95229d702af, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e98cbed2c55242389b1be95229d702af is 175, key is test_row_0/A:col10/1733884007911/Put/seqid=0 2024-12-11T02:26:49,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742065_1241 (size=31255) 2024-12-11T02:26:49,389 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e98cbed2c55242389b1be95229d702af 2024-12-11T02:26:49,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/4a6295b0830e4fcc8db2258b1dbb62df is 50, key is test_row_0/B:col10/1733884007911/Put/seqid=0 2024-12-11T02:26:49,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742066_1242 (size=12301) 2024-12-11T02:26:49,415 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/4a6295b0830e4fcc8db2258b1dbb62df 2024-12-11T02:26:49,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/cd8ed48b62c2426882a8d82a8a2b89ed is 50, key is test_row_0/C:col10/1733884007911/Put/seqid=0 2024-12-11T02:26:49,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742067_1243 (size=12301) 2024-12-11T02:26:49,443 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/cd8ed48b62c2426882a8d82a8a2b89ed 2024-12-11T02:26:49,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/e98cbed2c55242389b1be95229d702af as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af 2024-12-11T02:26:49,454 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af, entries=150, sequenceid=282, filesize=30.5 K 2024-12-11T02:26:49,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/4a6295b0830e4fcc8db2258b1dbb62df as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df 2024-12-11T02:26:49,465 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df, entries=150, sequenceid=282, filesize=12.0 K 2024-12-11T02:26:49,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/cd8ed48b62c2426882a8d82a8a2b89ed as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed 2024-12-11T02:26:49,472 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed, entries=150, sequenceid=282, filesize=12.0 K 2024-12-11T02:26:49,473 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for ccefedb36bdc39d0abb7cf1c7bd657fc in 521ms, sequenceid=282, compaction requested=false 2024-12-11T02:26:49,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:49,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-11T02:26:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-11T02:26:49,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-11T02:26:49,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2150 sec 2024-12-11T02:26:49,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.2210 sec 2024-12-11T02:26:49,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:26:49,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:49,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:49,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:49,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111134c06f4c244c6db22fcd8da1553f0e_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742068_1244 (size=12454) 2024-12-11T02:26:49,691 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:49,696 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111134c06f4c244c6db22fcd8da1553f0e_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111134c06f4c244c6db22fcd8da1553f0e_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,697 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/7fd6d7ab78fb4cfebc3da87460e47da5, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/7fd6d7ab78fb4cfebc3da87460e47da5 is 175, key is test_row_0/A:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742069_1245 (size=31255) 2024-12-11T02:26:49,707 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/7fd6d7ab78fb4cfebc3da87460e47da5 2024-12-11T02:26:49,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/58186830dfe64dba9a844d703517d50c is 50, key is test_row_0/B:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742070_1246 (size=12301) 2024-12-11T02:26:49,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/58186830dfe64dba9a844d703517d50c 2024-12-11T02:26:49,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/813df9066faf41cdaf385cb3d8380b1d is 50, key is test_row_0/C:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742071_1247 (size=12301) 2024-12-11T02:26:49,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/813df9066faf41cdaf385cb3d8380b1d 2024-12-11T02:26:49,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/7fd6d7ab78fb4cfebc3da87460e47da5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5 2024-12-11T02:26:49,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5, entries=150, sequenceid=299, filesize=30.5 K 2024-12-11T02:26:49,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/58186830dfe64dba9a844d703517d50c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c 2024-12-11T02:26:49,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c, entries=150, sequenceid=299, filesize=12.0 K 2024-12-11T02:26:49,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/813df9066faf41cdaf385cb3d8380b1d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d 2024-12-11T02:26:49,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d, entries=150, sequenceid=299, filesize=12.0 K 2024-12-11T02:26:49,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for ccefedb36bdc39d0abb7cf1c7bd657fc in 110ms, sequenceid=299, compaction requested=true 2024-12-11T02:26:49,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,776 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:49,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:49,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:49,776 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:49,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:49,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:49,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:49,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:49,778 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94295 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:49,778 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:49,778 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:49,779 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=92.1 K 2024-12-11T02:26:49,779 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:49,779 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5] 2024-12-11T02:26:49,779 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:49,779 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:49,779 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:49,779 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/047d2effca6645e5bbed5a34ed65075a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.6 K 2024-12-11T02:26:49,779 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b8f2711584a462ba17c383db19506f1, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007543 2024-12-11T02:26:49,780 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 047d2effca6645e5bbed5a34ed65075a, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007543 2024-12-11T02:26:49,780 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e98cbed2c55242389b1be95229d702af, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884007887 2024-12-11T02:26:49,781 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a6295b0830e4fcc8db2258b1dbb62df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884007887 2024-12-11T02:26:49,781 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd6d7ab78fb4cfebc3da87460e47da5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:49,781 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 58186830dfe64dba9a844d703517d50c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:49,793 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#210 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:49,794 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/22b0501e59d843e3a306734bb3c395b6 is 50, key is test_row_0/B:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,800 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:49,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:49,817 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211181f46840fe44842bff7a4815cb3821c_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,821 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211181f46840fe44842bff7a4815cb3821c_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,821 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211181f46840fe44842bff7a4815cb3821c_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742072_1248 (size=12983) 2024-12-11T02:26:49,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,836 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/22b0501e59d843e3a306734bb3c395b6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22b0501e59d843e3a306734bb3c395b6 2024-12-11T02:26:49,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173db793a5aa341348c0523fccf96173c_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884009806/Put/seqid=0 2024-12-11T02:26:49,873 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 22b0501e59d843e3a306734bb3c395b6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:49,873 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,873 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884009776; duration=0sec 2024-12-11T02:26:49,874 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:49,874 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:49,874 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:49,875 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:49,875 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:49,876 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:49,876 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/83d16a36616d46ad81c894fb9af8b839, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.6 K 2024-12-11T02:26:49,876 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 83d16a36616d46ad81c894fb9af8b839, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733884007543 2024-12-11T02:26:49,877 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cd8ed48b62c2426882a8d82a8a2b89ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884007887 2024-12-11T02:26:49,877 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 813df9066faf41cdaf385cb3d8380b1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:49,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742073_1249 (size=4469) 2024-12-11T02:26:49,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742074_1250 (size=12454) 2024-12-11T02:26:49,887 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:49,890 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#213 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:49,891 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/aa51a250e34c4a59ba1fe0b0e57f3162 is 50, key is test_row_0/C:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:49,893 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173db793a5aa341348c0523fccf96173c_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173db793a5aa341348c0523fccf96173c_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:49,894 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/3fb194a68eeb46bbb69bb8fb57e931ce, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:49,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/3fb194a68eeb46bbb69bb8fb57e931ce is 175, key is test_row_0/A:col10/1733884009806/Put/seqid=0 2024-12-11T02:26:49,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742075_1251 (size=31255) 2024-12-11T02:26:49,916 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=324, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/3fb194a68eeb46bbb69bb8fb57e931ce 2024-12-11T02:26:49,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742076_1252 (size=12983) 2024-12-11T02:26:49,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e60d75ebd2a2407196e14c70dcbda493 is 50, key is test_row_0/B:col10/1733884009806/Put/seqid=0 2024-12-11T02:26:49,930 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/aa51a250e34c4a59ba1fe0b0e57f3162 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/aa51a250e34c4a59ba1fe0b0e57f3162 2024-12-11T02:26:49,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884069935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884069935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:49,941 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into aa51a250e34c4a59ba1fe0b0e57f3162(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:49,942 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:49,942 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884009777; duration=0sec 2024-12-11T02:26:49,942 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:49,942 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:49,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742077_1253 (size=12301) 2024-12-11T02:26:49,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e60d75ebd2a2407196e14c70dcbda493 2024-12-11T02:26:49,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 is 50, key is test_row_0/C:col10/1733884009806/Put/seqid=0 2024-12-11T02:26:49,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742078_1254 (size=12301) 2024-12-11T02:26:49,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 2024-12-11T02:26:49,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/3fb194a68eeb46bbb69bb8fb57e931ce as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce 2024-12-11T02:26:49,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce, entries=150, sequenceid=324, filesize=30.5 K 2024-12-11T02:26:49,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e60d75ebd2a2407196e14c70dcbda493 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493 2024-12-11T02:26:49,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493, entries=150, sequenceid=324, filesize=12.0 K 2024-12-11T02:26:49,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 2024-12-11T02:26:49,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291, entries=150, sequenceid=324, filesize=12.0 K 2024-12-11T02:26:50,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ccefedb36bdc39d0abb7cf1c7bd657fc in 194ms, sequenceid=324, compaction requested=false 2024-12-11T02:26:50,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:50,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:50,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:50,147 INFO [master/5f57a24c5131:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-11T02:26:50,147 INFO [master/5f57a24c5131:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-11T02:26:50,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121140372384100f433f880e44160f61f509_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742079_1255 (size=12454) 2024-12-11T02:26:50,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884070207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884070210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,283 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#211 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:50,283 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/81bba5c6add341139a729c8fef68c879 is 175, key is test_row_0/A:col10/1733884009661/Put/seqid=0 2024-12-11T02:26:50,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742080_1256 (size=31937) 2024-12-11T02:26:50,296 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/81bba5c6add341139a729c8fef68c879 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879 2024-12-11T02:26:50,302 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 81bba5c6add341139a729c8fef68c879(size=31.2 K), total size for store is 61.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:50,302 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:50,302 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884009776; duration=0sec 2024-12-11T02:26:50,302 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:50,302 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:50,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884070312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884070312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884070514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884070516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,562 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:50,567 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121140372384100f433f880e44160f61f509_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121140372384100f433f880e44160f61f509_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:50,569 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0a16127e247a4170baa6ed580dd58342, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:50,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0a16127e247a4170baa6ed580dd58342 is 175, key is test_row_0/A:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:50,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742081_1257 (size=31255) 2024-12-11T02:26:50,580 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=339, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0a16127e247a4170baa6ed580dd58342 2024-12-11T02:26:50,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7eae4e76d3014ff7b5e0aa3e66ca3082 is 50, key is test_row_0/B:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:50,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742082_1258 (size=12301) 2024-12-11T02:26:50,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:50,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884070818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884070818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:50,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7eae4e76d3014ff7b5e0aa3e66ca3082 2024-12-11T02:26:51,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/2c307343d4174a9697a319cb3ac62b87 is 50, key is test_row_0/C:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:51,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742083_1259 (size=12301) 2024-12-11T02:26:51,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:51,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884071320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:51,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:51,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884071322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:51,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T02:26:51,363 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-11T02:26:51,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-11T02:26:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T02:26:51,366 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:51,367 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:51,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:51,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/2c307343d4174a9697a319cb3ac62b87 2024-12-11T02:26:51,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0a16127e247a4170baa6ed580dd58342 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342 2024-12-11T02:26:51,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342, entries=150, sequenceid=339, filesize=30.5 K 2024-12-11T02:26:51,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/7eae4e76d3014ff7b5e0aa3e66ca3082 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082 2024-12-11T02:26:51,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082, entries=150, sequenceid=339, filesize=12.0 K 2024-12-11T02:26:51,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/2c307343d4174a9697a319cb3ac62b87 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87 2024-12-11T02:26:51,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87, entries=150, sequenceid=339, filesize=12.0 K 2024-12-11T02:26:51,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1294ms, sequenceid=339, compaction requested=true 2024-12-11T02:26:51,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:51,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:51,437 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:51,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:51,437 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:51,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:51,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:51,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:51,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:51,438 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:51,439 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:51,439 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:51,439 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=92.2 K 2024-12-11T02:26:51,439 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:51,439 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342] 2024-12-11T02:26:51,439 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:51,439 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:51,439 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:51,439 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22b0501e59d843e3a306734bb3c395b6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.7 K 2024-12-11T02:26:51,439 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81bba5c6add341139a729c8fef68c879, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:51,440 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 22b0501e59d843e3a306734bb3c395b6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:51,440 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fb194a68eeb46bbb69bb8fb57e931ce, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733884009693 2024-12-11T02:26:51,440 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e60d75ebd2a2407196e14c70dcbda493, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733884009693 2024-12-11T02:26:51,440 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a16127e247a4170baa6ed580dd58342, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:51,441 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7eae4e76d3014ff7b5e0aa3e66ca3082, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:51,448 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:51,449 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:51,450 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ffc56a9dba564d7282a5ae6ad8106ded is 50, key is test_row_0/B:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:51,455 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121196e83c328f044dcb9ad3e189a863311f_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:51,457 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121196e83c328f044dcb9ad3e189a863311f_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:51,457 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121196e83c328f044dcb9ad3e189a863311f_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T02:26:51,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742084_1260 (size=13085) 2024-12-11T02:26:51,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742085_1261 (size=4469) 2024-12-11T02:26:51,474 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#220 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:51,475 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/9e845f85817f4092be391f815e6e531d is 175, key is test_row_0/A:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:51,478 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ffc56a9dba564d7282a5ae6ad8106ded as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ffc56a9dba564d7282a5ae6ad8106ded 2024-12-11T02:26:51,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742086_1262 (size=32039) 2024-12-11T02:26:51,485 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into ffc56a9dba564d7282a5ae6ad8106ded(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:51,485 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:51,485 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884011437; duration=0sec 2024-12-11T02:26:51,485 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:51,485 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:51,485 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:51,487 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:51,487 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:51,488 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:51,488 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/aa51a250e34c4a59ba1fe0b0e57f3162, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.7 K 2024-12-11T02:26:51,488 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting aa51a250e34c4a59ba1fe0b0e57f3162, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733884009035 2024-12-11T02:26:51,489 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a8d07ee2b1a425cb10f3dcb6b0a6291, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733884009693 2024-12-11T02:26:51,489 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c307343d4174a9697a319cb3ac62b87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:51,489 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/9e845f85817f4092be391f815e6e531d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d 2024-12-11T02:26:51,495 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 9e845f85817f4092be391f815e6e531d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:51,495 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:51,495 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884011437; duration=0sec 2024-12-11T02:26:51,495 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:51,495 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:51,499 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#221 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:51,500 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/cd7d1c10b0de4fc29879bf095aa875a1 is 50, key is test_row_0/C:col10/1733884009825/Put/seqid=0 2024-12-11T02:26:51,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742087_1263 (size=13085) 2024-12-11T02:26:51,511 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/cd7d1c10b0de4fc29879bf095aa875a1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd7d1c10b0de4fc29879bf095aa875a1 2024-12-11T02:26:51,516 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into cd7d1c10b0de4fc29879bf095aa875a1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:51,516 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:51,516 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884011438; duration=0sec 2024-12-11T02:26:51,517 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:51,517 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:51,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:51,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:51,522 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:51,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:51,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112854bd6aec2f4e7bb84d223a28ac7e35_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884010208/Put/seqid=0 2024-12-11T02:26:51,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742088_1264 (size=12454) 2024-12-11T02:26:51,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:51,544 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112854bd6aec2f4e7bb84d223a28ac7e35_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112854bd6aec2f4e7bb84d223a28ac7e35_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:51,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/52973391ef744c00812037e401571413, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:51,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/52973391ef744c00812037e401571413 is 175, key is test_row_0/A:col10/1733884010208/Put/seqid=0 2024-12-11T02:26:51,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742089_1265 (size=31255) 2024-12-11T02:26:51,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/52973391ef744c00812037e401571413 2024-12-11T02:26:51,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/70d6979f6a2546e0a79d849939b7256b is 50, key is test_row_0/B:col10/1733884010208/Put/seqid=0 2024-12-11T02:26:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742090_1266 (size=12301) 2024-12-11T02:26:51,583 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/70d6979f6a2546e0a79d849939b7256b 2024-12-11T02:26:51,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/c62e166af9ec4e4c998142d44f763a5c is 50, key is test_row_0/C:col10/1733884010208/Put/seqid=0 2024-12-11T02:26:51,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742091_1267 (size=12301) 2024-12-11T02:26:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T02:26:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T02:26:52,004 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/c62e166af9ec4e4c998142d44f763a5c 2024-12-11T02:26:52,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/52973391ef744c00812037e401571413 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413 2024-12-11T02:26:52,014 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413, entries=150, sequenceid=366, filesize=30.5 K 2024-12-11T02:26:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/70d6979f6a2546e0a79d849939b7256b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b 2024-12-11T02:26:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,021 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b, entries=150, sequenceid=366, filesize=12.0 K 2024-12-11T02:26:52,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/c62e166af9ec4e4c998142d44f763a5c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c 2024-12-11T02:26:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,026 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c, entries=150, sequenceid=366, filesize=12.0 K 2024-12-11T02:26:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,027 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for ccefedb36bdc39d0abb7cf1c7bd657fc in 505ms, sequenceid=366, compaction requested=false 2024-12-11T02:26:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-11T02:26:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-11T02:26:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-11T02:26:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 662 msec 2024-12-11T02:26:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 666 msec 2024-12-11T02:26:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:52,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:52,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T02:26:52,470 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-11T02:26:52,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-11T02:26:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:52,474 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:52,476 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:52,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:52,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119a8d17836b4140b48e867d17ab7719dc_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:52,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742092_1268 (size=20074) 2024-12-11T02:26:52,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884072530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884072530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40076 deadline: 1733884072557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,559 DEBUG [Thread-800 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:52,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40118 deadline: 1733884072562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,564 DEBUG [Thread-792 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:52,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40130 deadline: 1733884072591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,594 DEBUG [Thread-798 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:26:52,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:52,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:52,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884072634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884072634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:52,782 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:52,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:52,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884072837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884072838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,917 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:52,923 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119a8d17836b4140b48e867d17ab7719dc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a8d17836b4140b48e867d17ab7719dc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:52,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/abd9e9476bf94f3eada1349c9f8c3301, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:52,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/abd9e9476bf94f3eada1349c9f8c3301 is 175, key is test_row_0/A:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:52,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742093_1269 (size=57333) 2024-12-11T02:26:52,931 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=377, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/abd9e9476bf94f3eada1349c9f8c3301 2024-12-11T02:26:52,934 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:52,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:52,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:52,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:52,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:52,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a38101bc944746dc8608c57d9d210edb is 50, key is test_row_0/B:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:52,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742094_1270 (size=12301) 2024-12-11T02:26:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:53,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:53,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:53,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884073144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884073145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:53,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:53,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a38101bc944746dc8608c57d9d210edb 2024-12-11T02:26:53,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3effb332fb434402ba81c7928b849db9 is 50, key is test_row_0/C:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:53,394 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:53,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:53,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:53,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742095_1271 (size=12301) 2024-12-11T02:26:53,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3effb332fb434402ba81c7928b849db9 2024-12-11T02:26:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/abd9e9476bf94f3eada1349c9f8c3301 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301 2024-12-11T02:26:53,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301, entries=300, sequenceid=377, filesize=56.0 K 2024-12-11T02:26:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/a38101bc944746dc8608c57d9d210edb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb 2024-12-11T02:26:53,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb, entries=150, sequenceid=377, filesize=12.0 K 2024-12-11T02:26:53,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/3effb332fb434402ba81c7928b849db9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9 2024-12-11T02:26:53,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9, entries=150, sequenceid=377, filesize=12.0 K 2024-12-11T02:26:53,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1031ms, sequenceid=377, compaction requested=true 2024-12-11T02:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:53,450 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:53,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:53,451 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:53,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:53,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:53,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:53,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:53,452 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:53,452 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:53,452 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,452 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=117.8 K 2024-12-11T02:26:53,452 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,452 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301] 2024-12-11T02:26:53,453 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:53,453 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:53,453 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,453 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ffc56a9dba564d7282a5ae6ad8106ded, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.8 K 2024-12-11T02:26:53,454 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e845f85817f4092be391f815e6e531d, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:53,454 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ffc56a9dba564d7282a5ae6ad8106ded, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:53,454 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52973391ef744c00812037e401571413, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884010205 2024-12-11T02:26:53,454 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 70d6979f6a2546e0a79d849939b7256b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884010205 2024-12-11T02:26:53,454 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting abd9e9476bf94f3eada1349c9f8c3301, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012376 2024-12-11T02:26:53,455 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a38101bc944746dc8608c57d9d210edb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012412 2024-12-11T02:26:53,470 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#228 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:53,471 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/610965d8a750475a8bea09efe85fd4f4 is 50, key is test_row_0/B:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:53,477 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,497 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211ded08d8ee65d4a80ac6b0b09dff285fa_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,499 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211ded08d8ee65d4a80ac6b0b09dff285fa_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,499 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ded08d8ee65d4a80ac6b0b09dff285fa_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742096_1272 (size=13187) 2024-12-11T02:26:53,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742097_1273 (size=4469) 2024-12-11T02:26:53,510 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/610965d8a750475a8bea09efe85fd4f4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/610965d8a750475a8bea09efe85fd4f4 2024-12-11T02:26:53,512 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#229 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:53,513 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0ed68c719874e90b093acd7b21d9216 is 175, key is test_row_0/A:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:53,519 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 610965d8a750475a8bea09efe85fd4f4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:53,519 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:53,519 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884013451; duration=0sec 2024-12-11T02:26:53,520 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:53,520 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:53,520 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:53,521 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:53,521 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:53,522 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,522 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd7d1c10b0de4fc29879bf095aa875a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.8 K 2024-12-11T02:26:53,522 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cd7d1c10b0de4fc29879bf095aa875a1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1733884009825 2024-12-11T02:26:53,522 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c62e166af9ec4e4c998142d44f763a5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884010205 2024-12-11T02:26:53,523 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3effb332fb434402ba81c7928b849db9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012412 2024-12-11T02:26:53,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742098_1274 (size=32141) 2024-12-11T02:26:53,534 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#230 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:53,534 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/e48615a86a854ea59e2080e05aeab5c2 is 50, key is test_row_0/C:col10/1733884012418/Put/seqid=0 2024-12-11T02:26:53,537 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c0ed68c719874e90b093acd7b21d9216 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216 2024-12-11T02:26:53,543 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into c0ed68c719874e90b093acd7b21d9216(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:53,543 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:53,543 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884013450; duration=0sec 2024-12-11T02:26:53,543 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:53,543 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:53,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742099_1275 (size=13187) 2024-12-11T02:26:53,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T02:26:53,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,549 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:53,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,557 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/e48615a86a854ea59e2080e05aeab5c2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e48615a86a854ea59e2080e05aeab5c2 2024-12-11T02:26:53,564 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into e48615a86a854ea59e2080e05aeab5c2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:53,564 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:53,564 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884013451; duration=0sec 2024-12-11T02:26:53,564 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:53,564 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:53,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412113b577148094b4c73b140eeb0b731b527_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884012525/Put/seqid=0 2024-12-11T02:26:53,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742100_1276 (size=12454) 2024-12-11T02:26:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:53,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,584 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412113b577148094b4c73b140eeb0b731b527_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113b577148094b4c73b140eeb0b731b527_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:53,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/80c6d6dce1d64d8f9d0748ab1c88c40f, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/80c6d6dce1d64d8f9d0748ab1c88c40f is 175, key is test_row_0/A:col10/1733884012525/Put/seqid=0 2024-12-11T02:26:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742101_1277 (size=31255) 2024-12-11T02:26:53,594 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=404, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/80c6d6dce1d64d8f9d0748ab1c88c40f 2024-12-11T02:26:53,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/3d1874e3deea4c68a22fce74cbae9d89 is 50, key is test_row_0/B:col10/1733884012525/Put/seqid=0 2024-12-11T02:26:53,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742102_1278 (size=12301) 2024-12-11T02:26:53,617 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/3d1874e3deea4c68a22fce74cbae9d89 2024-12-11T02:26:53,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/428152d460854b0798afc5f3bd5b8d85 is 50, key is test_row_0/C:col10/1733884012525/Put/seqid=0 2024-12-11T02:26:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742103_1279 (size=12301) 2024-12-11T02:26:53,636 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/428152d460854b0798afc5f3bd5b8d85 2024-12-11T02:26:53,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/80c6d6dce1d64d8f9d0748ab1c88c40f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f 2024-12-11T02:26:53,646 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f, entries=150, sequenceid=404, filesize=30.5 K 2024-12-11T02:26:53,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/3d1874e3deea4c68a22fce74cbae9d89 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89 2024-12-11T02:26:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,652 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89, entries=150, sequenceid=404, filesize=12.0 K 2024-12-11T02:26:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/428152d460854b0798afc5f3bd5b8d85 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85 2024-12-11T02:26:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,659 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85, entries=150, sequenceid=404, filesize=12.0 K 2024-12-11T02:26:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,660 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for ccefedb36bdc39d0abb7cf1c7bd657fc in 112ms, sequenceid=404, compaction requested=false 2024-12-11T02:26:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-11T02:26:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-11T02:26:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-11T02:26:53,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1850 sec 2024-12-11T02:26:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.1920 sec 2024-12-11T02:26:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:53,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e1f9b95b9b3e4a6b8991d4c65ae3abe4_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742104_1280 (size=25158) 2024-12-11T02:26:53,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,720 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,725 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e1f9b95b9b3e4a6b8991d4c65ae3abe4_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e1f9b95b9b3e4a6b8991d4c65ae3abe4_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,726 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0cc59491d3ee495bb503dd485d708994, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:53,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0cc59491d3ee495bb503dd485d708994 is 175, key is test_row_0/A:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742105_1281 (size=74795) 2024-12-11T02:26:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:53,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884073769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884073776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884073877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:53,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:53,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884073878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884074080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884074082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,132 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0cc59491d3ee495bb503dd485d708994 2024-12-11T02:26:54,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f0876d7e0dec45468a912efb2f3df730 is 50, key is test_row_0/B:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:54,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742106_1282 (size=12301) 2024-12-11T02:26:54,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f0876d7e0dec45468a912efb2f3df730 2024-12-11T02:26:54,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/336d70266a8243a6bb94365d80f05441 is 50, key is test_row_0/C:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742107_1283 (size=12301) 2024-12-11T02:26:54,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884074384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884074386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T02:26:54,580 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-11T02:26:54,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-11T02:26:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:54,583 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:54,584 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:54,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:54,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/336d70266a8243a6bb94365d80f05441 2024-12-11T02:26:54,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/0cc59491d3ee495bb503dd485d708994 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994 2024-12-11T02:26:54,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994, entries=400, sequenceid=416, filesize=73.0 K 2024-12-11T02:26:54,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f0876d7e0dec45468a912efb2f3df730 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730 2024-12-11T02:26:54,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730, entries=150, sequenceid=416, filesize=12.0 K 2024-12-11T02:26:54,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/336d70266a8243a6bb94365d80f05441 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441 2024-12-11T02:26:54,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441, entries=150, sequenceid=416, filesize=12.0 K 2024-12-11T02:26:54,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for ccefedb36bdc39d0abb7cf1c7bd657fc in 918ms, sequenceid=416, compaction requested=true 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:54,616 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:54,616 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:54,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:54,617 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:54,617 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:54,618 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:54,618 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/610965d8a750475a8bea09efe85fd4f4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.9 K 2024-12-11T02:26:54,618 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138191 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:54,618 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 610965d8a750475a8bea09efe85fd4f4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012412 2024-12-11T02:26:54,618 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:54,618 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:54,618 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=135.0 K 2024-12-11T02:26:54,618 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:54,619 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994] 2024-12-11T02:26:54,619 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d1874e3deea4c68a22fce74cbae9d89, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1733884012489 2024-12-11T02:26:54,619 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0ed68c719874e90b093acd7b21d9216, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012412 2024-12-11T02:26:54,620 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f0876d7e0dec45468a912efb2f3df730, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013684 2024-12-11T02:26:54,620 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80c6d6dce1d64d8f9d0748ab1c88c40f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1733884012489 2024-12-11T02:26:54,621 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cc59491d3ee495bb503dd485d708994, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013683 2024-12-11T02:26:54,628 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:54,629 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:54,629 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/eee2f4677637463da8b46af5c588a5c6 is 50, key is test_row_0/B:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:54,629 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121183e701fea7444551a2e6dc4af8bb2448_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:54,633 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121183e701fea7444551a2e6dc4af8bb2448_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:54,633 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121183e701fea7444551a2e6dc4af8bb2448_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:54,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742108_1284 (size=13289) 2024-12-11T02:26:54,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742109_1285 (size=4469) 2024-12-11T02:26:54,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:54,736 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-11T02:26:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:54,737 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:26:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:54,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:54,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:54,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:54,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e4eb85e587964737ae3fb11bd3b215d9_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884013767/Put/seqid=0 2024-12-11T02:26:54,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742110_1286 (size=12454) 2024-12-11T02:26:54,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:54,787 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e4eb85e587964737ae3fb11bd3b215d9_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e4eb85e587964737ae3fb11bd3b215d9_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:54,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8f1ddd8e724f49c7bf271460c096a34a, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:54,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8f1ddd8e724f49c7bf271460c096a34a is 175, key is test_row_0/A:col10/1733884013767/Put/seqid=0 2024-12-11T02:26:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742111_1287 (size=31255) 2024-12-11T02:26:54,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:54,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:54,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:54,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:54,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884074912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:54,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884074912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884075017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884075017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,052 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/eee2f4677637463da8b46af5c588a5c6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/eee2f4677637463da8b46af5c588a5c6 2024-12-11T02:26:55,058 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into eee2f4677637463da8b46af5c588a5c6(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:55,058 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:55,058 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884014616; duration=0sec 2024-12-11T02:26:55,058 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:55,058 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:55,058 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:55,059 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:55,064 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:55,064 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:55,064 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e48615a86a854ea59e2080e05aeab5c2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=36.9 K 2024-12-11T02:26:55,065 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e48615a86a854ea59e2080e05aeab5c2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1733884012412 2024-12-11T02:26:55,065 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 428152d460854b0798afc5f3bd5b8d85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1733884012489 2024-12-11T02:26:55,065 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 336d70266a8243a6bb94365d80f05441, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013684 2024-12-11T02:26:55,068 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#238 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:55,069 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1d9ba8c5d6c24860aabe3c5f9a37faad is 175, key is test_row_0/A:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:55,089 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:55,090 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/95f048489012453f8412d7b20437201a is 50, key is test_row_0/C:col10/1733884013697/Put/seqid=0 2024-12-11T02:26:55,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742112_1288 (size=32243) 2024-12-11T02:26:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742113_1289 (size=13289) 2024-12-11T02:26:55,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:55,212 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=442, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8f1ddd8e724f49c7bf271460c096a34a 2024-12-11T02:26:55,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884075221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884075222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/915349b008344a72ab19c8e4f9aa1a1e is 50, key is test_row_0/B:col10/1733884013767/Put/seqid=0 2024-12-11T02:26:55,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742114_1290 (size=12301) 2024-12-11T02:26:55,253 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/915349b008344a72ab19c8e4f9aa1a1e 2024-12-11T02:26:55,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/54dc2e05747e45cda381b4ffd2ac6788 is 50, key is test_row_0/C:col10/1733884013767/Put/seqid=0 2024-12-11T02:26:55,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742115_1291 (size=12301) 2024-12-11T02:26:55,501 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1d9ba8c5d6c24860aabe3c5f9a37faad as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad 2024-12-11T02:26:55,508 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 1d9ba8c5d6c24860aabe3c5f9a37faad(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:55,508 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:55,508 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884014616; duration=0sec 2024-12-11T02:26:55,508 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:55,509 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:55,515 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/95f048489012453f8412d7b20437201a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/95f048489012453f8412d7b20437201a 2024-12-11T02:26:55,521 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 95f048489012453f8412d7b20437201a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:55,521 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:55,522 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884014616; duration=0sec 2024-12-11T02:26:55,522 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:55,522 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:55,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884075524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884075525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:55,682 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/54dc2e05747e45cda381b4ffd2ac6788 2024-12-11T02:26:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/8f1ddd8e724f49c7bf271460c096a34a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a 2024-12-11T02:26:55,696 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a, entries=150, sequenceid=442, filesize=30.5 K 2024-12-11T02:26:55,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/915349b008344a72ab19c8e4f9aa1a1e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e 2024-12-11T02:26:55,701 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e, entries=150, sequenceid=442, filesize=12.0 K 2024-12-11T02:26:55,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/54dc2e05747e45cda381b4ffd2ac6788 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788 2024-12-11T02:26:55,708 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788, entries=150, sequenceid=442, filesize=12.0 K 2024-12-11T02:26:55,709 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ccefedb36bdc39d0abb7cf1c7bd657fc in 972ms, sequenceid=442, compaction requested=false 2024-12-11T02:26:55,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:55,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:55,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-11T02:26:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-11T02:26:55,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-11T02:26:55,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1260 sec 2024-12-11T02:26:55,714 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.1320 sec 2024-12-11T02:26:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:56,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:26:56,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:56,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:56,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:56,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:56,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:56,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:56,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111839bc0019e64a53be45d1c334eed219_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742116_1292 (size=12454) 2024-12-11T02:26:56,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884076090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884076091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884076194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884076195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884076399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884076399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,461 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:56,465 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111839bc0019e64a53be45d1c334eed219_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111839bc0019e64a53be45d1c334eed219_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:56,467 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1ec4221388f548eb833a0ff351e5f65a, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:56,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1ec4221388f548eb833a0ff351e5f65a is 175, key is test_row_0/A:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742117_1293 (size=31255) 2024-12-11T02:26:56,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T02:26:56,687 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-11T02:26:56,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:26:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-11T02:26:56,690 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:26:56,691 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:26:56,691 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:26:56,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:56,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884076702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:56,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884076703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:56,843 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:56,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:56,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:56,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:56,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:56,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:56,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:56,874 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=457, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1ec4221388f548eb833a0ff351e5f65a 2024-12-11T02:26:56,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f49a363af7634caea0d31c5a5eb38c0a is 50, key is test_row_0/B:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:56,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742118_1294 (size=12301) 2024-12-11T02:26:56,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:56,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:56,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:56,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:56,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:56,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:56,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:57,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:57,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884077207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884077207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f49a363af7634caea0d31c5a5eb38c0a 2024-12-11T02:26:57,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:57,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/92d9ee144338465991babdb59dce0252 is 50, key is test_row_0/C:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:57,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:57,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:57,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742119_1295 (size=12301) 2024-12-11T02:26:57,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,456 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:57,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:57,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:26:57,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/92d9ee144338465991babdb59dce0252 2024-12-11T02:26:57,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/1ec4221388f548eb833a0ff351e5f65a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a 2024-12-11T02:26:57,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a, entries=150, sequenceid=457, filesize=30.5 K 2024-12-11T02:26:57,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/f49a363af7634caea0d31c5a5eb38c0a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a 2024-12-11T02:26:57,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a, entries=150, sequenceid=457, filesize=12.0 K 2024-12-11T02:26:57,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/92d9ee144338465991babdb59dce0252 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252 2024-12-11T02:26:57,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252, entries=150, sequenceid=457, filesize=12.0 K 2024-12-11T02:26:57,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1697ms, sequenceid=457, compaction requested=true 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:57,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:26:57,728 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:57,729 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:57,729 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:26:57,729 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,729 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:57,729 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=92.5 K 2024-12-11T02:26:57,729 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,729 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:26:57,729 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a] 2024-12-11T02:26:57,729 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,730 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/eee2f4677637463da8b46af5c588a5c6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=37.0 K 2024-12-11T02:26:57,730 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d9ba8c5d6c24860aabe3c5f9a37faad, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013684 2024-12-11T02:26:57,730 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting eee2f4677637463da8b46af5c588a5c6, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013684 2024-12-11T02:26:57,730 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f1ddd8e724f49c7bf271460c096a34a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733884013761 2024-12-11T02:26:57,730 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 915349b008344a72ab19c8e4f9aa1a1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733884013761 2024-12-11T02:26:57,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ec4221388f548eb833a0ff351e5f65a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:26:57,731 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f49a363af7634caea0d31c5a5eb38c0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:26:57,742 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:57,743 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/d120e810bb57470182ebc3f403c0564c is 50, key is test_row_0/B:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:57,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:26:57,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T02:26:57,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,763 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:26:57,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:57,768 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:57,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742120_1296 (size=13391) 2024-12-11T02:26:57,773 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211642377908c074bb898627c91a05473ad_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:57,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121176291f0cc14245bdbbfddd6779be4cd5_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884016090/Put/seqid=0 2024-12-11T02:26:57,775 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211642377908c074bb898627c91a05473ad_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:57,776 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211642377908c074bb898627c91a05473ad_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:57,777 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/d120e810bb57470182ebc3f403c0564c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/d120e810bb57470182ebc3f403c0564c 2024-12-11T02:26:57,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742121_1297 (size=12454) 2024-12-11T02:26:57,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:57,784 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into d120e810bb57470182ebc3f403c0564c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:57,784 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:57,784 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884017728; duration=0sec 2024-12-11T02:26:57,784 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:26:57,784 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:26:57,784 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:26:57,786 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121176291f0cc14245bdbbfddd6779be4cd5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176291f0cc14245bdbbfddd6779be4cd5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742122_1298 (size=4469) 2024-12-11T02:26:57,787 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:26:57,787 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:26:57,787 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:57,787 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/95f048489012453f8412d7b20437201a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=37.0 K 2024-12-11T02:26:57,788 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#247 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:57,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c4a8a708495a4e1c939077087fd59f8d, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:57,789 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 95f048489012453f8412d7b20437201a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1733884013684 2024-12-11T02:26:57,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c4a8a708495a4e1c939077087fd59f8d is 175, key is test_row_0/A:col10/1733884016090/Put/seqid=0 2024-12-11T02:26:57,789 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/9899eaaba7e74e8abf99549b56eadc5d is 175, key is test_row_0/A:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:57,789 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 54dc2e05747e45cda381b4ffd2ac6788, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=442, earliestPutTs=1733884013761 2024-12-11T02:26:57,791 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 92d9ee144338465991babdb59dce0252, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:26:57,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742123_1299 (size=31255) 2024-12-11T02:26:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:57,809 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#249 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:26:57,809 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/10992c4bf4c7483abb6a28ccc51f4b8e is 50, key is test_row_0/C:col10/1733884014904/Put/seqid=0 2024-12-11T02:26:57,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742124_1300 (size=32345) 2024-12-11T02:26:57,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742125_1301 (size=13391) 2024-12-11T02:26:57,836 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/10992c4bf4c7483abb6a28ccc51f4b8e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/10992c4bf4c7483abb6a28ccc51f4b8e 2024-12-11T02:26:57,843 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 10992c4bf4c7483abb6a28ccc51f4b8e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:57,843 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:57,843 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884017728; duration=0sec 2024-12-11T02:26:57,843 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:57,845 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:26:58,199 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=481, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c4a8a708495a4e1c939077087fd59f8d 2024-12-11T02:26:58,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c049c2724bd49ce96d5738c74f2163f is 50, key is test_row_0/B:col10/1733884016090/Put/seqid=0 2024-12-11T02:26:58,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. as already flushing 2024-12-11T02:26:58,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:58,219 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/9899eaaba7e74e8abf99549b56eadc5d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d 2024-12-11T02:26:58,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742126_1302 (size=12301) 2024-12-11T02:26:58,224 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into 9899eaaba7e74e8abf99549b56eadc5d(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:26:58,224 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:58,224 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884017728; duration=0sec 2024-12-11T02:26:58,225 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:26:58,225 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:26:58,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884078229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884078232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884078333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884078334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884078534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884078537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,618 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c049c2724bd49ce96d5738c74f2163f 2024-12-11T02:26:58,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/26b16e8787cf4e2e8a8b392929401c1b is 50, key is test_row_0/C:col10/1733884016090/Put/seqid=0 2024-12-11T02:26:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742127_1303 (size=12301) 2024-12-11T02:26:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:26:58,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884078839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:58,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:58,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884078841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,032 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/26b16e8787cf4e2e8a8b392929401c1b 2024-12-11T02:26:59,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/c4a8a708495a4e1c939077087fd59f8d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d 2024-12-11T02:26:59,040 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d, entries=150, sequenceid=481, filesize=30.5 K 2024-12-11T02:26:59,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/9c049c2724bd49ce96d5738c74f2163f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f 2024-12-11T02:26:59,045 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f, entries=150, sequenceid=481, filesize=12.0 K 2024-12-11T02:26:59,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/26b16e8787cf4e2e8a8b392929401c1b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b 2024-12-11T02:26:59,049 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b, entries=150, sequenceid=481, filesize=12.0 K 2024-12-11T02:26:59,050 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1287ms, sequenceid=481, compaction requested=false 2024-12-11T02:26:59,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-11T02:26:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-11T02:26:59,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-11T02:26:59,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3610 sec 2024-12-11T02:26:59,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.3660 sec 2024-12-11T02:26:59,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:59,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:26:59,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:26:59,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121135836ee0eaf6457b9fde3240946d48a0_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884019347/Put/seqid=0 2024-12-11T02:26:59,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742128_1304 (size=17534) 2024-12-11T02:26:59,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884079398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884079399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884079503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884079503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884079706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:26:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884079707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:26:59,780 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:26:59,785 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121135836ee0eaf6457b9fde3240946d48a0_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121135836ee0eaf6457b9fde3240946d48a0_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:26:59,786 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79855b0cfd0a40468835c3341e44d048, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:26:59,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79855b0cfd0a40468835c3341e44d048 is 175, key is test_row_0/A:col10/1733884019347/Put/seqid=0 2024-12-11T02:26:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742129_1305 (size=48639) 2024-12-11T02:26:59,919 DEBUG [Thread-805 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:63149 2024-12-11T02:26:59,919 DEBUG [Thread-809 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c1ec7ee to 127.0.0.1:63149 2024-12-11T02:26:59,919 DEBUG [Thread-805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:59,919 DEBUG [Thread-809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:59,920 DEBUG [Thread-803 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:63149 2024-12-11T02:26:59,920 DEBUG [Thread-803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:26:59,922 DEBUG [Thread-807 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:63149 2024-12-11T02:26:59,922 DEBUG [Thread-807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:00,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884080008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:00,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884080012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:00,191 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=498, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79855b0cfd0a40468835c3341e44d048 2024-12-11T02:27:00,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e9e7889f9e49456caf8ecaed2f4a0b92 is 50, key is test_row_0/B:col10/1733884019347/Put/seqid=0 2024-12-11T02:27:00,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742130_1306 (size=12301) 2024-12-11T02:27:00,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40072 deadline: 1733884080512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:00,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:00,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40102 deadline: 1733884080515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:00,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e9e7889f9e49456caf8ecaed2f4a0b92 2024-12-11T02:27:00,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6a87092a574b43579d56ed0e39a5cc25 is 50, key is test_row_0/C:col10/1733884019347/Put/seqid=0 2024-12-11T02:27:00,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742131_1307 (size=12301) 2024-12-11T02:27:00,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T02:27:00,803 INFO [Thread-802 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-11T02:27:01,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6a87092a574b43579d56ed0e39a5cc25 2024-12-11T02:27:01,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/79855b0cfd0a40468835c3341e44d048 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048 2024-12-11T02:27:01,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048, entries=250, sequenceid=498, filesize=47.5 K 2024-12-11T02:27:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/e9e7889f9e49456caf8ecaed2f4a0b92 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92 2024-12-11T02:27:01,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92, entries=150, sequenceid=498, filesize=12.0 K 2024-12-11T02:27:01,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/6a87092a574b43579d56ed0e39a5cc25 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25 2024-12-11T02:27:01,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25, entries=150, sequenceid=498, filesize=12.0 K 2024-12-11T02:27:01,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1663ms, sequenceid=498, compaction requested=true 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:01,029 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:01,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ccefedb36bdc39d0abb7cf1c7bd657fc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:01,029 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:01,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:01,030 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112239 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:01,030 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:01,030 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/A is initiating minor compaction (all files) 2024-12-11T02:27:01,031 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/B is initiating minor compaction (all files) 2024-12-11T02:27:01,031 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/A in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:01,031 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/B in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:01,031 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=109.6 K 2024-12-11T02:27:01,031 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/d120e810bb57470182ebc3f403c0564c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=37.1 K 2024-12-11T02:27:01,031 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:01,031 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048] 2024-12-11T02:27:01,031 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d120e810bb57470182ebc3f403c0564c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:27:01,031 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c049c2724bd49ce96d5738c74f2163f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1733884016078 2024-12-11T02:27:01,032 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e9e7889f9e49456caf8ecaed2f4a0b92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1733884018231 2024-12-11T02:27:01,032 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9899eaaba7e74e8abf99549b56eadc5d, keycount=150, bloomtype=ROW, size=31.6 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:27:01,032 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4a8a708495a4e1c939077087fd59f8d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1733884016078 2024-12-11T02:27:01,033 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79855b0cfd0a40468835c3341e44d048, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1733884018227 2024-12-11T02:27:01,043 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#B#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:01,044 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/22c34cdd21d04bde8dc92edc65df309e is 50, key is test_row_0/B:col10/1733884019347/Put/seqid=0 2024-12-11T02:27:01,048 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:01,051 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211fc67e849078d4aadac5ae91d0cc0e4a1_ccefedb36bdc39d0abb7cf1c7bd657fc store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:01,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742132_1308 (size=13493) 2024-12-11T02:27:01,083 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211fc67e849078d4aadac5ae91d0cc0e4a1_ccefedb36bdc39d0abb7cf1c7bd657fc, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:01,083 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211fc67e849078d4aadac5ae91d0cc0e4a1_ccefedb36bdc39d0abb7cf1c7bd657fc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:01,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742133_1309 (size=4469) 2024-12-11T02:27:01,458 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/22c34cdd21d04bde8dc92edc65df309e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22c34cdd21d04bde8dc92edc65df309e 2024-12-11T02:27:01,462 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/B of ccefedb36bdc39d0abb7cf1c7bd657fc into 22c34cdd21d04bde8dc92edc65df309e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:01,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:01,462 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/B, priority=13, startTime=1733884021029; duration=0sec 2024-12-11T02:27:01,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:01,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:B 2024-12-11T02:27:01,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:01,463 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:01,463 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ccefedb36bdc39d0abb7cf1c7bd657fc/C is initiating minor compaction (all files) 2024-12-11T02:27:01,463 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ccefedb36bdc39d0abb7cf1c7bd657fc/C in TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:01,463 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/10992c4bf4c7483abb6a28ccc51f4b8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp, totalSize=37.1 K 2024-12-11T02:27:01,463 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 10992c4bf4c7483abb6a28ccc51f4b8e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=457, earliestPutTs=1733884014904 2024-12-11T02:27:01,464 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 26b16e8787cf4e2e8a8b392929401c1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=481, earliestPutTs=1733884016078 2024-12-11T02:27:01,464 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a87092a574b43579d56ed0e39a5cc25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=498, earliestPutTs=1733884018231 2024-12-11T02:27:01,471 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#C#compaction#257 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:01,471 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/306aec0ede344df393b5142ab0e2a200 is 50, key is test_row_0/C:col10/1733884019347/Put/seqid=0 2024-12-11T02:27:01,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742134_1310 (size=13493) 2024-12-11T02:27:01,488 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ccefedb36bdc39d0abb7cf1c7bd657fc#A#compaction#256 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:01,489 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/f08c3602014e45c8bd3e2c174d407dfd is 175, key is test_row_0/A:col10/1733884019347/Put/seqid=0 2024-12-11T02:27:01,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742135_1311 (size=32447) 2024-12-11T02:27:01,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:01,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:27:01,517 DEBUG [Thread-794 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e3a4420 to 127.0.0.1:63149 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:01,517 DEBUG [Thread-794 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:27:01,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:01,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211117b7c07706840a18ccb35b95960aee5_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884021515/Put/seqid=0 2024-12-11T02:27:01,525 DEBUG [Thread-796 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:63149 2024-12-11T02:27:01,525 DEBUG [Thread-796 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:01,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742136_1312 (size=12454) 2024-12-11T02:27:01,880 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/306aec0ede344df393b5142ab0e2a200 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/306aec0ede344df393b5142ab0e2a200 2024-12-11T02:27:01,884 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/C of ccefedb36bdc39d0abb7cf1c7bd657fc into 306aec0ede344df393b5142ab0e2a200(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:01,884 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:01,884 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/C, priority=13, startTime=1733884021029; duration=0sec 2024-12-11T02:27:01,884 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:01,884 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:C 2024-12-11T02:27:01,896 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/f08c3602014e45c8bd3e2c174d407dfd as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/f08c3602014e45c8bd3e2c174d407dfd 2024-12-11T02:27:01,900 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ccefedb36bdc39d0abb7cf1c7bd657fc/A of ccefedb36bdc39d0abb7cf1c7bd657fc into f08c3602014e45c8bd3e2c174d407dfd(size=31.7 K), total size for store is 31.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:01,901 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:01,901 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc., storeName=ccefedb36bdc39d0abb7cf1c7bd657fc/A, priority=13, startTime=1733884021029; duration=0sec 2024-12-11T02:27:01,901 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:01,901 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ccefedb36bdc39d0abb7cf1c7bd657fc:A 2024-12-11T02:27:01,928 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:01,931 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211117b7c07706840a18ccb35b95960aee5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211117b7c07706840a18ccb35b95960aee5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:01,932 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/070717abeea949439b004a61a2971d80, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:01,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/070717abeea949439b004a61a2971d80 is 175, key is test_row_0/A:col10/1733884021515/Put/seqid=0 2024-12-11T02:27:01,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742137_1313 (size=31255) 2024-12-11T02:27:02,336 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=522, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/070717abeea949439b004a61a2971d80 2024-12-11T02:27:02,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ebf310df98ea47da9eb79cd0be776a0a is 50, key is test_row_0/B:col10/1733884021515/Put/seqid=0 2024-12-11T02:27:02,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742138_1314 (size=12301) 2024-12-11T02:27:02,572 DEBUG [Thread-800 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:63149 2024-12-11T02:27:02,572 DEBUG [Thread-800 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:02,607 DEBUG [Thread-798 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:63149 2024-12-11T02:27:02,607 DEBUG [Thread-798 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:02,626 DEBUG [Thread-792 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:63149 2024-12-11T02:27:02,626 DEBUG [Thread-792 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 144 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 164 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5925 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5862 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:27:02,626 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2554 2024-12-11T02:27:02,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7662 rows 2024-12-11T02:27:02,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2567 2024-12-11T02:27:02,627 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7701 rows 2024-12-11T02:27:02,627 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:27:02,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26401a5f to 127.0.0.1:63149 2024-12-11T02:27:02,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:02,630 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:27:02,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:27:02,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:02,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:02,635 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884022635"}]},"ts":"1733884022635"} 2024-12-11T02:27:02,636 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:27:02,639 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:27:02,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:27:02,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, UNASSIGN}] 2024-12-11T02:27:02,641 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, UNASSIGN 2024-12-11T02:27:02,642 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:02,643 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:27:02,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:02,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ebf310df98ea47da9eb79cd0be776a0a 2024-12-11T02:27:02,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/e642c0574aa14e76b2bfb5a4eebb5290 is 50, key is test_row_0/C:col10/1733884021515/Put/seqid=0 2024-12-11T02:27:02,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742139_1315 (size=12301) 2024-12-11T02:27:02,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:02,795 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:02,795 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:27:02,795 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing ccefedb36bdc39d0abb7cf1c7bd657fc, disabling compactions & flushes 2024-12-11T02:27:02,795 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:02,932 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:27:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:03,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/e642c0574aa14e76b2bfb5a4eebb5290 2024-12-11T02:27:03,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/070717abeea949439b004a61a2971d80 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/070717abeea949439b004a61a2971d80 2024-12-11T02:27:03,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/070717abeea949439b004a61a2971d80, entries=150, sequenceid=522, filesize=30.5 K 2024-12-11T02:27:03,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/ebf310df98ea47da9eb79cd0be776a0a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ebf310df98ea47da9eb79cd0be776a0a 2024-12-11T02:27:03,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ebf310df98ea47da9eb79cd0be776a0a, entries=150, sequenceid=522, filesize=12.0 K 2024-12-11T02:27:03,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/e642c0574aa14e76b2bfb5a4eebb5290 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e642c0574aa14e76b2bfb5a4eebb5290 2024-12-11T02:27:03,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e642c0574aa14e76b2bfb5a4eebb5290, entries=150, sequenceid=522, filesize=12.0 K 2024-12-11T02:27:03,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1662ms, sequenceid=522, compaction requested=false 2024-12-11T02:27:03,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:03,179 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. after waiting 0 ms 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:03,179 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(2837): Flushing ccefedb36bdc39d0abb7cf1c7bd657fc 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=A 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=B 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ccefedb36bdc39d0abb7cf1c7bd657fc, store=C 2024-12-11T02:27:03,179 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:03,185 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f3daddb4960947c1941a4e4bb6d62811_ccefedb36bdc39d0abb7cf1c7bd657fc is 50, key is test_row_0/A:col10/1733884022570/Put/seqid=0 2024-12-11T02:27:03,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742140_1316 (size=9914) 2024-12-11T02:27:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:03,590 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:03,594 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f3daddb4960947c1941a4e4bb6d62811_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f3daddb4960947c1941a4e4bb6d62811_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:03,595 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/a154b38ea53e4925875afbf2c3f2bc36, store: [table=TestAcidGuarantees family=A region=ccefedb36bdc39d0abb7cf1c7bd657fc] 2024-12-11T02:27:03,596 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/a154b38ea53e4925875afbf2c3f2bc36 is 175, key is test_row_0/A:col10/1733884022570/Put/seqid=0 2024-12-11T02:27:03,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742141_1317 (size=22561) 2024-12-11T02:27:03,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:04,000 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=531, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/a154b38ea53e4925875afbf2c3f2bc36 2024-12-11T02:27:04,007 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/5ebff300691140b0ba0cda54f672fef8 is 50, key is test_row_0/B:col10/1733884022570/Put/seqid=0 2024-12-11T02:27:04,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742142_1318 (size=9857) 2024-12-11T02:27:04,412 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/5ebff300691140b0ba0cda54f672fef8 2024-12-11T02:27:04,419 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/76b277d6916e4f4fb25bff6a90a49b0f is 50, key is test_row_0/C:col10/1733884022570/Put/seqid=0 2024-12-11T02:27:04,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742143_1319 (size=9857) 2024-12-11T02:27:04,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:04,824 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/76b277d6916e4f4fb25bff6a90a49b0f 2024-12-11T02:27:04,829 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/A/a154b38ea53e4925875afbf2c3f2bc36 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/a154b38ea53e4925875afbf2c3f2bc36 2024-12-11T02:27:04,832 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/a154b38ea53e4925875afbf2c3f2bc36, entries=100, sequenceid=531, filesize=22.0 K 2024-12-11T02:27:04,833 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/B/5ebff300691140b0ba0cda54f672fef8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/5ebff300691140b0ba0cda54f672fef8 2024-12-11T02:27:04,836 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/5ebff300691140b0ba0cda54f672fef8, entries=100, sequenceid=531, filesize=9.6 K 2024-12-11T02:27:04,837 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/.tmp/C/76b277d6916e4f4fb25bff6a90a49b0f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/76b277d6916e4f4fb25bff6a90a49b0f 2024-12-11T02:27:04,841 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/76b277d6916e4f4fb25bff6a90a49b0f, entries=100, sequenceid=531, filesize=9.6 K 2024-12-11T02:27:04,842 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ccefedb36bdc39d0abb7cf1c7bd657fc in 1663ms, sequenceid=531, compaction requested=true 2024-12-11T02:27:04,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048] to archive 2024-12-11T02:27:04,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:04,846 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/43ccf72006d742639405cd6d382d4f97 2024-12-11T02:27:04,846 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/44b29db4356a4de7a50da4e226b0eacb 2024-12-11T02:27:04,846 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8480fe36c375418eb4e3b793508616d9 2024-12-11T02:27:04,846 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/fe3ff39bb9a74f50a4bd9dadf2f5e0ff 2024-12-11T02:27:04,847 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/d3d9b06a77eb4cb790e30cb0c9d16a33 2024-12-11T02:27:04,847 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0c55be63af64c34b14a5efd8198e7cb 2024-12-11T02:27:04,847 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/47438701b69e49b1bca852844feec8ca 2024-12-11T02:27:04,847 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5d43ed1375c347819ff5be16ba453c96 2024-12-11T02:27:04,849 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0009e5cd59204a0485599c3c75eeb78e 2024-12-11T02:27:04,849 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/5a4edd3f4924427396dcd10d6115421f 2024-12-11T02:27:04,849 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0eeaaf09e13841e290fb9e4af9676114 2024-12-11T02:27:04,849 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e016ab70b4c64ca5ba5356e0e691399e 2024-12-11T02:27:04,850 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79cc911b803545e9931dc9a52353741f 2024-12-11T02:27:04,851 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/cfa0ae00122e4edb980584416d9a3277 2024-12-11T02:27:04,851 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/68c1e43fac1f4888a05c926d75b9b6d0 2024-12-11T02:27:04,851 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/2013ba94a62f442e9cb0929e805fc2bd 2024-12-11T02:27:04,853 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/78e6d3c6b54e4bd78284feca9b782f75 2024-12-11T02:27:04,853 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/6b8f2711584a462ba17c383db19506f1 2024-12-11T02:27:04,853 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/81bba5c6add341139a729c8fef68c879 2024-12-11T02:27:04,853 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/e98cbed2c55242389b1be95229d702af 2024-12-11T02:27:04,854 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/7fd6d7ab78fb4cfebc3da87460e47da5 2024-12-11T02:27:04,854 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9e845f85817f4092be391f815e6e531d 2024-12-11T02:27:04,854 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/3fb194a68eeb46bbb69bb8fb57e931ce 2024-12-11T02:27:04,855 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c0ed68c719874e90b093acd7b21d9216 2024-12-11T02:27:04,855 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/abd9e9476bf94f3eada1349c9f8c3301 2024-12-11T02:27:04,855 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0a16127e247a4170baa6ed580dd58342 2024-12-11T02:27:04,855 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/52973391ef744c00812037e401571413 2024-12-11T02:27:04,856 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/0cc59491d3ee495bb503dd485d708994 2024-12-11T02:27:04,856 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/80c6d6dce1d64d8f9d0748ab1c88c40f 2024-12-11T02:27:04,857 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1d9ba8c5d6c24860aabe3c5f9a37faad 2024-12-11T02:27:04,857 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/8f1ddd8e724f49c7bf271460c096a34a 2024-12-11T02:27:04,858 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/9899eaaba7e74e8abf99549b56eadc5d 2024-12-11T02:27:04,858 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/1ec4221388f548eb833a0ff351e5f65a 2024-12-11T02:27:04,858 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/c4a8a708495a4e1c939077087fd59f8d 2024-12-11T02:27:04,858 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/79855b0cfd0a40468835c3341e44d048 2024-12-11T02:27:04,859 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/051d85e4b91b4a8198dc6ca0a41ed570, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/68088123d257426faf22cf5c2e69a454, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ce5af055c9a54cb9a909a5d67f6539d6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/b2ec6de0abca43a7a538f51e98ec3590, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/047d2effca6645e5bbed5a34ed65075a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22b0501e59d843e3a306734bb3c395b6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ffc56a9dba564d7282a5ae6ad8106ded, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/610965d8a750475a8bea09efe85fd4f4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/eee2f4677637463da8b46af5c588a5c6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/d120e810bb57470182ebc3f403c0564c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92] to archive 2024-12-11T02:27:04,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:04,863 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/2ebd5e36c21b4e4fb508a9ee00d4b2fb 2024-12-11T02:27:04,863 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a09b5b5a37bc466eb0debcf10c836c27 2024-12-11T02:27:04,863 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/8f0c524aafe64ab786729904ca52ddaa 2024-12-11T02:27:04,863 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a02596e1762d4c98bf4c490d13741a31 2024-12-11T02:27:04,864 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/051d85e4b91b4a8198dc6ca0a41ed570 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/051d85e4b91b4a8198dc6ca0a41ed570 2024-12-11T02:27:04,864 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/68088123d257426faf22cf5c2e69a454 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/68088123d257426faf22cf5c2e69a454 2024-12-11T02:27:04,864 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7987a03df6fa490888399f43479f9fe4 2024-12-11T02:27:04,865 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/05703ce21baf47c2971431c83bc88e22 2024-12-11T02:27:04,867 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a95ac0358b274edba9017db90c96cf3d 2024-12-11T02:27:04,867 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/050c30ede2d647189ba45c7001939022 2024-12-11T02:27:04,867 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c40d91eba714005a16e4c9c6ec9b19e 2024-12-11T02:27:04,867 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ce5af055c9a54cb9a909a5d67f6539d6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ce5af055c9a54cb9a909a5d67f6539d6 2024-12-11T02:27:04,867 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/b2ec6de0abca43a7a538f51e98ec3590 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/b2ec6de0abca43a7a538f51e98ec3590 2024-12-11T02:27:04,868 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/0c03af17ebf44c0ea2c759d22cebd2a5 2024-12-11T02:27:04,869 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/33918e85164c420f9aaa8be061133850 2024-12-11T02:27:04,870 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/49d142360f1a4aa6835ac73f08cdf017 2024-12-11T02:27:04,870 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/047d2effca6645e5bbed5a34ed65075a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/047d2effca6645e5bbed5a34ed65075a 2024-12-11T02:27:04,870 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/4a6295b0830e4fcc8db2258b1dbb62df 2024-12-11T02:27:04,871 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/c846e5fb585c4a908790c73676f77512 2024-12-11T02:27:04,871 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22b0501e59d843e3a306734bb3c395b6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22b0501e59d843e3a306734bb3c395b6 2024-12-11T02:27:04,871 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/58186830dfe64dba9a844d703517d50c 2024-12-11T02:27:04,872 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e60d75ebd2a2407196e14c70dcbda493 2024-12-11T02:27:04,872 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ffc56a9dba564d7282a5ae6ad8106ded to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ffc56a9dba564d7282a5ae6ad8106ded 2024-12-11T02:27:04,873 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/7eae4e76d3014ff7b5e0aa3e66ca3082 2024-12-11T02:27:04,873 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/610965d8a750475a8bea09efe85fd4f4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/610965d8a750475a8bea09efe85fd4f4 2024-12-11T02:27:04,873 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/70d6979f6a2546e0a79d849939b7256b 2024-12-11T02:27:04,874 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/a38101bc944746dc8608c57d9d210edb 2024-12-11T02:27:04,874 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/eee2f4677637463da8b46af5c588a5c6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/eee2f4677637463da8b46af5c588a5c6 2024-12-11T02:27:04,874 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/3d1874e3deea4c68a22fce74cbae9d89 2024-12-11T02:27:04,875 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f0876d7e0dec45468a912efb2f3df730 2024-12-11T02:27:04,876 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/915349b008344a72ab19c8e4f9aa1a1e 2024-12-11T02:27:04,876 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/9c049c2724bd49ce96d5738c74f2163f 2024-12-11T02:27:04,876 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/d120e810bb57470182ebc3f403c0564c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/d120e810bb57470182ebc3f403c0564c 2024-12-11T02:27:04,876 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/f49a363af7634caea0d31c5a5eb38c0a 2024-12-11T02:27:04,876 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/e9e7889f9e49456caf8ecaed2f4a0b92 2024-12-11T02:27:04,878 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/722dc311b1f348668e870c3089651dee, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/370ae0ab0a2140ad883c1690b5bb70ea, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7b1b933eda6a402f89624fd60e79b734, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/d2bdf91277cd4119bba6dd1ff70bb44f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/83d16a36616d46ad81c894fb9af8b839, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/aa51a250e34c4a59ba1fe0b0e57f3162, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd7d1c10b0de4fc29879bf095aa875a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e48615a86a854ea59e2080e05aeab5c2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/95f048489012453f8412d7b20437201a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/10992c4bf4c7483abb6a28ccc51f4b8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25] to archive 2024-12-11T02:27:04,879 DEBUG [StoreCloser-TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:04,882 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/55658bf27cbe40d4803a8c481cacae48 2024-12-11T02:27:04,882 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/36621148923f46e3b4925181948817e1 2024-12-11T02:27:04,882 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b7947152e1e643709afdcfc2119786ab 2024-12-11T02:27:04,882 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/370ae0ab0a2140ad883c1690b5bb70ea to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/370ae0ab0a2140ad883c1690b5bb70ea 2024-12-11T02:27:04,882 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/722dc311b1f348668e870c3089651dee to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/722dc311b1f348668e870c3089651dee 2024-12-11T02:27:04,883 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/0c89c1b0ff484294b6d8f478ae8c888a 2024-12-11T02:27:04,884 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7f6a18c2eb974f069a9092960c8a1b05 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/af641234e9c447eda49a85763dd8d8d3 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/24a0c4d1405f4659b687e29cb7f98d9b 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3677b0f42c774bdb9e1c1f58454598ca 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7b1b933eda6a402f89624fd60e79b734 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/7b1b933eda6a402f89624fd60e79b734 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/d2bdf91277cd4119bba6dd1ff70bb44f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/d2bdf91277cd4119bba6dd1ff70bb44f 2024-12-11T02:27:04,886 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/96242fcef45a4e9b8396b78ce2e9f1f2 2024-12-11T02:27:04,887 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/b93015982e224f439cd6c5f6780c1ab8 2024-12-11T02:27:04,887 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/a83d54b765ea43a382048d867ba72e55 2024-12-11T02:27:04,888 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/abaa129b0a08426a8c43861ec3d0e2ef 2024-12-11T02:27:04,888 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/83d16a36616d46ad81c894fb9af8b839 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/83d16a36616d46ad81c894fb9af8b839 2024-12-11T02:27:04,888 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6da11f09bac742f686756aa15d81e357 2024-12-11T02:27:04,888 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/aa51a250e34c4a59ba1fe0b0e57f3162 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/aa51a250e34c4a59ba1fe0b0e57f3162 2024-12-11T02:27:04,889 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/813df9066faf41cdaf385cb3d8380b1d 2024-12-11T02:27:04,889 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd8ed48b62c2426882a8d82a8a2b89ed 2024-12-11T02:27:04,890 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/1a8d07ee2b1a425cb10f3dcb6b0a6291 2024-12-11T02:27:04,890 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd7d1c10b0de4fc29879bf095aa875a1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/cd7d1c10b0de4fc29879bf095aa875a1 2024-12-11T02:27:04,891 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/2c307343d4174a9697a319cb3ac62b87 2024-12-11T02:27:04,891 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/c62e166af9ec4e4c998142d44f763a5c 2024-12-11T02:27:04,891 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e48615a86a854ea59e2080e05aeab5c2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e48615a86a854ea59e2080e05aeab5c2 2024-12-11T02:27:04,891 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/95f048489012453f8412d7b20437201a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/95f048489012453f8412d7b20437201a 2024-12-11T02:27:04,891 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/428152d460854b0798afc5f3bd5b8d85 2024-12-11T02:27:04,892 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/3effb332fb434402ba81c7928b849db9 2024-12-11T02:27:04,893 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/336d70266a8243a6bb94365d80f05441 2024-12-11T02:27:04,893 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/54dc2e05747e45cda381b4ffd2ac6788 2024-12-11T02:27:04,893 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/10992c4bf4c7483abb6a28ccc51f4b8e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/10992c4bf4c7483abb6a28ccc51f4b8e 2024-12-11T02:27:04,893 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/92d9ee144338465991babdb59dce0252 2024-12-11T02:27:04,894 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/6a87092a574b43579d56ed0e39a5cc25 2024-12-11T02:27:04,894 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/26b16e8787cf4e2e8a8b392929401c1b 2024-12-11T02:27:04,898 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits/534.seqid, newMaxSeqId=534, maxSeqId=4 2024-12-11T02:27:04,898 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc. 2024-12-11T02:27:04,898 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for ccefedb36bdc39d0abb7cf1c7bd657fc: 2024-12-11T02:27:04,900 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:04,900 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=ccefedb36bdc39d0abb7cf1c7bd657fc, regionState=CLOSED 2024-12-11T02:27:04,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-11T02:27:04,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure ccefedb36bdc39d0abb7cf1c7bd657fc, server=5f57a24c5131,40311,1733883964600 in 2.2580 sec 2024-12-11T02:27:04,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=58 2024-12-11T02:27:04,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=58, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ccefedb36bdc39d0abb7cf1c7bd657fc, UNASSIGN in 2.2620 sec 2024-12-11T02:27:04,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-11T02:27:04,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2650 sec 2024-12-11T02:27:04,906 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884024906"}]},"ts":"1733884024906"} 2024-12-11T02:27:04,906 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:27:04,910 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:27:04,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2790 sec 2024-12-11T02:27:06,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T02:27:06,739 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-11T02:27:06,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:27:06,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,741 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=61, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T02:27:06,742 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=61, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,743 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,745 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits] 2024-12-11T02:27:06,749 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/f08c3602014e45c8bd3e2c174d407dfd to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/f08c3602014e45c8bd3e2c174d407dfd 2024-12-11T02:27:06,749 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/070717abeea949439b004a61a2971d80 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/070717abeea949439b004a61a2971d80 2024-12-11T02:27:06,749 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/a154b38ea53e4925875afbf2c3f2bc36 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/A/a154b38ea53e4925875afbf2c3f2bc36 2024-12-11T02:27:06,752 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/5ebff300691140b0ba0cda54f672fef8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/5ebff300691140b0ba0cda54f672fef8 2024-12-11T02:27:06,752 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22c34cdd21d04bde8dc92edc65df309e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/22c34cdd21d04bde8dc92edc65df309e 2024-12-11T02:27:06,752 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ebf310df98ea47da9eb79cd0be776a0a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/B/ebf310df98ea47da9eb79cd0be776a0a 2024-12-11T02:27:06,755 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e642c0574aa14e76b2bfb5a4eebb5290 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/e642c0574aa14e76b2bfb5a4eebb5290 2024-12-11T02:27:06,755 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/76b277d6916e4f4fb25bff6a90a49b0f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/76b277d6916e4f4fb25bff6a90a49b0f 2024-12-11T02:27:06,755 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/306aec0ede344df393b5142ab0e2a200 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/C/306aec0ede344df393b5142ab0e2a200 2024-12-11T02:27:06,758 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits/534.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc/recovered.edits/534.seqid 2024-12-11T02:27:06,758 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,758 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:27:06,759 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:27:06,759 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T02:27:06,769 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111134c06f4c244c6db22fcd8da1553f0e_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111134c06f4c244c6db22fcd8da1553f0e_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111839bc0019e64a53be45d1c334eed219_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111839bc0019e64a53be45d1c334eed219_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211117b7c07706840a18ccb35b95960aee5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211117b7c07706840a18ccb35b95960aee5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121135836ee0eaf6457b9fde3240946d48a0_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121135836ee0eaf6457b9fde3240946d48a0_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112854bd6aec2f4e7bb84d223a28ac7e35_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112854bd6aec2f4e7bb84d223a28ac7e35_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113b577148094b4c73b140eeb0b731b527_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113b577148094b4c73b140eeb0b731b527_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,770 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121129fb7ebcadfe4964b1f2969b707fbedc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121129fb7ebcadfe4964b1f2969b707fbedc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,771 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121140372384100f433f880e44160f61f509_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121140372384100f433f880e44160f61f509_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211518b379c63864a57aa917c90ee45f057_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211518b379c63864a57aa917c90ee45f057_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114cb2f881f14c42ca9926d8fc639a115d_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114cb2f881f14c42ca9926d8fc639a115d_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116bc9a6b59a424273ab61b7d4bfc2fa2b_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116bc9a6b59a424273ab61b7d4bfc2fa2b_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211514000ccb2e54b6a8b9eea7a76f71f53_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211514000ccb2e54b6a8b9eea7a76f71f53_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121158326472aae5426e8ef4d2a65afe7d50_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121158326472aae5426e8ef4d2a65afe7d50_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,772 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117139356ed0c046ab82bf2c6db025682b_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117139356ed0c046ab82bf2c6db025682b_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,773 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115998667be231465196dd82da950299dc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115998667be231465196dd82da950299dc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,773 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173db793a5aa341348c0523fccf96173c_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173db793a5aa341348c0523fccf96173c_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,774 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176291f0cc14245bdbbfddd6779be4cd5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176291f0cc14245bdbbfddd6779be4cd5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ae7653b2d05c4ce6b045577e1086f380_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ae7653b2d05c4ce6b045577e1086f380_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190544224e3124147b7361d11337d281e_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190544224e3124147b7361d11337d281e_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a8d17836b4140b48e867d17ab7719dc_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a8d17836b4140b48e867d17ab7719dc_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121177887b81b3bc496a8f3306ad806d1237_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121177887b81b3bc496a8f3306ad806d1237_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b85c8a1c6d304441a92cd3afdcfab41f_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b85c8a1c6d304441a92cd3afdcfab41f_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,775 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bc7d085477e243cbae0fc9d03d4830fa_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bc7d085477e243cbae0fc9d03d4830fa_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,776 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e1f9b95b9b3e4a6b8991d4c65ae3abe4_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e1f9b95b9b3e4a6b8991d4c65ae3abe4_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,776 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f710dfd99cd14be3897be069de6b6af5_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f710dfd99cd14be3897be069de6b6af5_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,776 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e4eb85e587964737ae3fb11bd3b215d9_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e4eb85e587964737ae3fb11bd3b215d9_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,776 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f3daddb4960947c1941a4e4bb6d62811_ccefedb36bdc39d0abb7cf1c7bd657fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f3daddb4960947c1941a4e4bb6d62811_ccefedb36bdc39d0abb7cf1c7bd657fc 2024-12-11T02:27:06,777 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:27:06,779 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=61, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,781 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:27:06,784 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:27:06,785 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=61, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,785 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:27:06,785 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733884026785"}]},"ts":"9223372036854775807"} 2024-12-11T02:27:06,787 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:27:06,787 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ccefedb36bdc39d0abb7cf1c7bd657fc, NAME => 'TestAcidGuarantees,,1733883996780.ccefedb36bdc39d0abb7cf1c7bd657fc.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:27:06,787 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:27:06,787 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733884026787"}]},"ts":"9223372036854775807"} 2024-12-11T02:27:06,789 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:27:06,795 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=61, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-12-11T02:27:06,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T02:27:06,843 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-11T02:27:06,853 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 245) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1663596587_22 at /127.0.0.1:47838 [Waiting for operation #741] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1663596587_22 at /127.0.0.1:47658 [Waiting for operation #799] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2d5916a5-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_440387272_22 at /127.0.0.1:47576 [Waiting for operation #821] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_440387272_22 at /127.0.0.1:36206 [Waiting for operation #532] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=465 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=384 (was 268) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4349 (was 4528) 2024-12-11T02:27:06,861 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=384, ProcessCount=11, AvailableMemoryMB=4349 2024-12-11T02:27:06,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:27:06,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:27:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:06,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:27:06,865 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:06,865 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 62 2024-12-11T02:27:06,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:06,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:27:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742144_1320 (size=963) 2024-12-11T02:27:06,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:07,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:07,273 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:27:07,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742145_1321 (size=53) 2024-12-11T02:27:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 66c347f1441760076f62fd1847fd01aa, disabling compactions & flushes 2024-12-11T02:27:07,679 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. after waiting 0 ms 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:07,679 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:07,679 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:07,680 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:27:07,680 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733884027680"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733884027680"}]},"ts":"1733884027680"} 2024-12-11T02:27:07,681 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:27:07,682 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:27:07,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884027682"}]},"ts":"1733884027682"} 2024-12-11T02:27:07,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:27:07,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, ASSIGN}] 2024-12-11T02:27:07,687 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, ASSIGN 2024-12-11T02:27:07,688 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:27:07,838 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=66c347f1441760076f62fd1847fd01aa, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:07,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; OpenRegionProcedure 66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:07,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:07,994 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:07,994 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(7285): Opening region: {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:27:07,995 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:07,995 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:27:07,995 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(7327): checking encryption for 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:07,995 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(7330): checking classloading for 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:07,996 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:07,998 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:07,998 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66c347f1441760076f62fd1847fd01aa columnFamilyName A 2024-12-11T02:27:07,998 DEBUG [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:07,998 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(327): Store=66c347f1441760076f62fd1847fd01aa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:07,999 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:07,999 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:08,000 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66c347f1441760076f62fd1847fd01aa columnFamilyName B 2024-12-11T02:27:08,000 DEBUG [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:08,000 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(327): Store=66c347f1441760076f62fd1847fd01aa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:08,000 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:08,001 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:08,001 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66c347f1441760076f62fd1847fd01aa columnFamilyName C 2024-12-11T02:27:08,001 DEBUG [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:08,002 INFO [StoreOpener-66c347f1441760076f62fd1847fd01aa-1 {}] regionserver.HStore(327): Store=66c347f1441760076f62fd1847fd01aa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:08,002 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:08,002 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:08,003 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:08,004 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:27:08,005 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(1085): writing seq id for 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:08,007 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:27:08,007 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(1102): Opened 66c347f1441760076f62fd1847fd01aa; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67040147, jitterRate=-0.0010239630937576294}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:27:08,008 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegion(1001): Region open journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:08,009 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., pid=64, masterSystemTime=1733884027991 2024-12-11T02:27:08,010 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:08,010 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=64}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:08,011 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=66c347f1441760076f62fd1847fd01aa, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:08,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-11T02:27:08,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; OpenRegionProcedure 66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 in 172 msec 2024-12-11T02:27:08,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-12-11T02:27:08,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, ASSIGN in 327 msec 2024-12-11T02:27:08,015 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:27:08,015 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884028015"}]},"ts":"1733884028015"} 2024-12-11T02:27:08,016 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:27:08,018 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=62, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:27:08,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-12-11T02:27:08,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-11T02:27:08,969 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 62 completed 2024-12-11T02:27:08,971 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-12-11T02:27:08,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:08,976 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:08,977 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:08,978 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:27:08,979 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40620, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:27:08,981 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-12-11T02:27:08,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:08,985 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-11T02:27:08,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:08,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-12-11T02:27:08,991 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:08,992 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-12-11T02:27:08,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:08,996 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-12-11T02:27:08,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,000 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-11T02:27:09,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,004 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-11T02:27:09,007 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-12-11T02:27:09,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,011 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-12-11T02:27:09,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,014 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x598cfed4 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@521aad6f 2024-12-11T02:27:09,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c86f707, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:09,021 DEBUG [hconnection-0xbe116ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,021 DEBUG [hconnection-0x1e158618-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:09,022 DEBUG [hconnection-0xdb6f6a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,022 DEBUG [hconnection-0x4fc463db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,023 DEBUG [hconnection-0x573dc4a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,023 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,023 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,023 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,023 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,024 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-11T02:27:09,025 DEBUG [hconnection-0x62aa1b0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T02:27:09,025 DEBUG [hconnection-0x67bf82a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,026 DEBUG [hconnection-0x233737a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,026 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:09,026 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,026 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,027 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,027 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:09,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:09,028 DEBUG [hconnection-0x9a747c4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,030 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:09,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,048 DEBUG [hconnection-0x6a559206-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:09,050 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884089056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884089056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884089057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884089063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 is 50, key is test_row_0/A:col10/1733884029031/Put/seqid=0 2024-12-11T02:27:09,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884089067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742146_1322 (size=12001) 2024-12-11T02:27:09,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 2024-12-11T02:27:09,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T02:27:09,147 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e79c9b36940743f2878af881205a15c2 is 50, key is test_row_0/B:col10/1733884029031/Put/seqid=0 2024-12-11T02:27:09,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884089165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884089165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884089168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884089169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884089172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-11T02:27:09,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:09,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:09,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:09,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:09,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:09,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:09,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742147_1323 (size=12001) 2024-12-11T02:27:09,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e79c9b36940743f2878af881205a15c2 2024-12-11T02:27:09,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/365a52b164ff4ddd8ee1585df763c161 is 50, key is test_row_0/C:col10/1733884029031/Put/seqid=0 2024-12-11T02:27:09,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742148_1324 (size=12001) 2024-12-11T02:27:09,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/365a52b164ff4ddd8ee1585df763c161 2024-12-11T02:27:09,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 2024-12-11T02:27:09,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:27:09,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e79c9b36940743f2878af881205a15c2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2 2024-12-11T02:27:09,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:27:09,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/365a52b164ff4ddd8ee1585df763c161 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161 2024-12-11T02:27:09,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:27:09,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 66c347f1441760076f62fd1847fd01aa in 270ms, sequenceid=13, compaction requested=false 2024-12-11T02:27:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T02:27:09,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:09,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T02:27:09,335 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-11T02:27:09,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:09,335 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:09,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:09,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7d99f9a48904442d885d1a21fd26e0fb is 50, key is test_row_0/A:col10/1733884029048/Put/seqid=0 2024-12-11T02:27:09,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:09,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884089398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884089398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884089402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884089402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884089402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742149_1325 (size=12001) 2024-12-11T02:27:09,425 DEBUG [master/5f57a24c5131:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 5519ba8b50773a902ba9dca0bed2059c changed from -1.0 to 0.0, refreshing cache 2024-12-11T02:27:09,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884089504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884089504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884089506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884089507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884089507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T02:27:09,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884089706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884089706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884089710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884089710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884089711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:09,824 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7d99f9a48904442d885d1a21fd26e0fb 2024-12-11T02:27:09,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e96a4342a34544699afc39aedb1ff61b is 50, key is test_row_0/B:col10/1733884029048/Put/seqid=0 2024-12-11T02:27:09,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742150_1326 (size=12001) 2024-12-11T02:27:09,862 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e96a4342a34544699afc39aedb1ff61b 2024-12-11T02:27:09,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/d94bc57e10a7419aa2d8f10e8256fe65 is 50, key is test_row_0/C:col10/1733884029048/Put/seqid=0 2024-12-11T02:27:09,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742151_1327 (size=12001) 2024-12-11T02:27:09,885 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/d94bc57e10a7419aa2d8f10e8256fe65 2024-12-11T02:27:09,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7d99f9a48904442d885d1a21fd26e0fb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb 2024-12-11T02:27:09,898 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T02:27:09,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e96a4342a34544699afc39aedb1ff61b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b 2024-12-11T02:27:09,905 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T02:27:09,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/d94bc57e10a7419aa2d8f10e8256fe65 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65 2024-12-11T02:27:09,911 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T02:27:09,912 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 66c347f1441760076f62fd1847fd01aa in 577ms, sequenceid=37, compaction requested=false 2024-12-11T02:27:09,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:09,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:09,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-11T02:27:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-11T02:27:09,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-11T02:27:09,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 887 msec 2024-12-11T02:27:09,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 895 msec 2024-12-11T02:27:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:10,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:10,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/305f3345834d468c864d935f0c309bf7 is 50, key is test_row_0/A:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742152_1328 (size=12001) 2024-12-11T02:27:10,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/305f3345834d468c864d935f0c309bf7 2024-12-11T02:27:10,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c2b9dad8bf2544f08715a0d7e428ab0f is 50, key is test_row_0/B:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742153_1329 (size=12001) 2024-12-11T02:27:10,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T02:27:10,129 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-11T02:27:10,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-11T02:27:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:10,133 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:10,134 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:10,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:10,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:10,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-11T02:27:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,432 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:27:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:10,439 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-11T02:27:10,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:10,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:10,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c2b9dad8bf2544f08715a0d7e428ab0f 2024-12-11T02:27:10,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a39ccfeaeb0341c0ae65e00ecfc96e3e is 50, key is test_row_0/C:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742154_1330 (size=12001) 2024-12-11T02:27:10,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a39ccfeaeb0341c0ae65e00ecfc96e3e 2024-12-11T02:27:10,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/305f3345834d468c864d935f0c309bf7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7 2024-12-11T02:27:10,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:27:10,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c2b9dad8bf2544f08715a0d7e428ab0f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f 2024-12-11T02:27:10,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:27:10,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a39ccfeaeb0341c0ae65e00ecfc96e3e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e 2024-12-11T02:27:10,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:27:10,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 66c347f1441760076f62fd1847fd01aa in 493ms, sequenceid=50, compaction requested=true 2024-12-11T02:27:10,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:10,503 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:10,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:10,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:10,503 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:10,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:10,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:10,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:10,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:10,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:10,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:10,505 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,505 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.2 K 2024-12-11T02:27:10,505 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:10,505 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:10,505 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,505 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f8e5b70e3cc4c05999f7c0ed9bd9fa3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884029031 2024-12-11T02:27:10,505 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.2 K 2024-12-11T02:27:10,506 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d99f9a48904442d885d1a21fd26e0fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733884029048 2024-12-11T02:27:10,506 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e79c9b36940743f2878af881205a15c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884029031 2024-12-11T02:27:10,506 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 305f3345834d468c864d935f0c309bf7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:10,507 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e96a4342a34544699afc39aedb1ff61b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733884029048 2024-12-11T02:27:10,507 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c2b9dad8bf2544f08715a0d7e428ab0f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:10,526 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#273 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:10,527 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:10,527 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/e886d2029cc748b5850b3e4342d6299a is 50, key is test_row_0/A:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,527 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/86b8d561a0ee410d9f6295ce16e32bc4 is 50, key is test_row_0/B:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742155_1331 (size=12104) 2024-12-11T02:27:10,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742156_1332 (size=12104) 2024-12-11T02:27:10,548 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/86b8d561a0ee410d9f6295ce16e32bc4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/86b8d561a0ee410d9f6295ce16e32bc4 2024-12-11T02:27:10,556 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into 86b8d561a0ee410d9f6295ce16e32bc4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:10,556 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:10,556 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=13, startTime=1733884030503; duration=0sec 2024-12-11T02:27:10,556 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:10,556 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:10,556 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:10,557 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:10,558 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:10,558 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,558 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.2 K 2024-12-11T02:27:10,558 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 365a52b164ff4ddd8ee1585df763c161, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884029031 2024-12-11T02:27:10,558 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d94bc57e10a7419aa2d8f10e8256fe65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733884029048 2024-12-11T02:27:10,559 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a39ccfeaeb0341c0ae65e00ecfc96e3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:10,566 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#275 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:10,566 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0ed825a08f714fd086815657dda286cb is 50, key is test_row_0/C:col10/1733884030008/Put/seqid=0 2024-12-11T02:27:10,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742157_1333 (size=12104) 2024-12-11T02:27:10,588 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0ed825a08f714fd086815657dda286cb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0ed825a08f714fd086815657dda286cb 2024-12-11T02:27:10,594 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into 0ed825a08f714fd086815657dda286cb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:10,594 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:10,594 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=13, startTime=1733884030504; duration=0sec 2024-12-11T02:27:10,594 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:10,594 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:10,597 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-11T02:27:10,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:10,598 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:10,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:10,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/1b9c467ab11a4bd1ba2632ff8da93195 is 50, key is test_row_0/A:col10/1733884030028/Put/seqid=0 2024-12-11T02:27:10,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742158_1334 (size=12001) 2024-12-11T02:27:10,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:10,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:10,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:10,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,939 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/e886d2029cc748b5850b3e4342d6299a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e886d2029cc748b5850b3e4342d6299a 2024-12-11T02:27:10,944 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into e886d2029cc748b5850b3e4342d6299a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:10,945 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:10,945 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=13, startTime=1733884030503; duration=0sec 2024-12-11T02:27:10,945 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:10,945 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884090957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884090958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884090958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884090958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:10,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:10,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884090959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,010 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/1b9c467ab11a4bd1ba2632ff8da93195 2024-12-11T02:27:11,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/5c571ccaba0141c787b4497e19e94d21 is 50, key is test_row_0/B:col10/1733884030028/Put/seqid=0 2024-12-11T02:27:11,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742159_1335 (size=12001) 2024-12-11T02:27:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:11,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884091259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884091260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884091260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884091261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884091263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,426 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/5c571ccaba0141c787b4497e19e94d21 2024-12-11T02:27:11,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/9d8967cb07154bd4b5430344a0e8ca51 is 50, key is test_row_0/C:col10/1733884030028/Put/seqid=0 2024-12-11T02:27:11,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742160_1336 (size=12001) 2024-12-11T02:27:11,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884091761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884091764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884091764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884091765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884091770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:11,839 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/9d8967cb07154bd4b5430344a0e8ca51 2024-12-11T02:27:11,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/1b9c467ab11a4bd1ba2632ff8da93195 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195 2024-12-11T02:27:11,853 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195, entries=150, sequenceid=76, filesize=11.7 K 2024-12-11T02:27:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/5c571ccaba0141c787b4497e19e94d21 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21 2024-12-11T02:27:11,858 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21, entries=150, sequenceid=76, filesize=11.7 K 2024-12-11T02:27:11,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/9d8967cb07154bd4b5430344a0e8ca51 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51 2024-12-11T02:27:11,864 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51, entries=150, sequenceid=76, filesize=11.7 K 2024-12-11T02:27:11,866 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 66c347f1441760076f62fd1847fd01aa in 1269ms, sequenceid=76, compaction requested=false 2024-12-11T02:27:11,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:11,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:11,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-11T02:27:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-11T02:27:11,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-11T02:27:11,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7340 sec 2024-12-11T02:27:11,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.7410 sec 2024-12-11T02:27:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T02:27:12,238 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-11T02:27:12,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:12,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-11T02:27:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-11T02:27:12,241 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:12,242 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:12,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:12,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-11T02:27:12,393 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-11T02:27:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:12,394 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:27:12,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d6f3f08634b04b1a8771324c2164230c is 50, key is test_row_0/A:col10/1733884030644/Put/seqid=0 2024-12-11T02:27:12,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742161_1337 (size=12001) 2024-12-11T02:27:12,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-11T02:27:12,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:12,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:12,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884092784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884092785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884092786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884092787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884092788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,806 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d6f3f08634b04b1a8771324c2164230c 2024-12-11T02:27:12,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/30c0a4b0f08d4e82a565860a3452a83a is 50, key is test_row_0/B:col10/1733884030644/Put/seqid=0 2024-12-11T02:27:12,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742162_1338 (size=12001) 2024-12-11T02:27:12,818 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/30c0a4b0f08d4e82a565860a3452a83a 2024-12-11T02:27:12,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5ad57efcf4df4a18bc1248f53982ebd0 is 50, key is test_row_0/C:col10/1733884030644/Put/seqid=0 2024-12-11T02:27:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742163_1339 (size=12001) 2024-12-11T02:27:12,829 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5ad57efcf4df4a18bc1248f53982ebd0 2024-12-11T02:27:12,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d6f3f08634b04b1a8771324c2164230c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c 2024-12-11T02:27:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-11T02:27:12,847 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c, entries=150, sequenceid=89, filesize=11.7 K 2024-12-11T02:27:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/30c0a4b0f08d4e82a565860a3452a83a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a 2024-12-11T02:27:12,855 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a, entries=150, sequenceid=89, filesize=11.7 K 2024-12-11T02:27:12,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5ad57efcf4df4a18bc1248f53982ebd0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0 2024-12-11T02:27:12,861 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0, entries=150, sequenceid=89, filesize=11.7 K 2024-12-11T02:27:12,862 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 66c347f1441760076f62fd1847fd01aa in 468ms, sequenceid=89, compaction requested=true 2024-12-11T02:27:12,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:12,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:12,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-11T02:27:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-11T02:27:12,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-11T02:27:12,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 622 msec 2024-12-11T02:27:12,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 627 msec 2024-12-11T02:27:12,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:12,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:27:12,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:12,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d30642754fe4449fbe8896a60e4c6fa1 is 50, key is test_row_0/A:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:12,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884092897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884092897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884092901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884092903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884092902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:12,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742164_1340 (size=12001) 2024-12-11T02:27:13,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884093004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884093004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884093004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884093008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884093008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884093208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884093208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884093209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884093213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884093213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d30642754fe4449fbe8896a60e4c6fa1 2024-12-11T02:27:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-11T02:27:13,345 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-11T02:27:13,346 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-11T02:27:13,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:13,348 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:13,349 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:13,349 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:13,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/2006b3c464f44fc2be4a6f5573e3a1a3 is 50, key is test_row_0/B:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:13,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742165_1341 (size=12001) 2024-12-11T02:27:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:13,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:13,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:13,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884093512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884093512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884093514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884093515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884093519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:13,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:13,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:13,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/2006b3c464f44fc2be4a6f5573e3a1a3 2024-12-11T02:27:13,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/c15a0eae1fa54cd78f481c48e5d82bd7 is 50, key is test_row_0/C:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742166_1342 (size=12001) 2024-12-11T02:27:13,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:13,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:13,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:13,961 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:13,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:13,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:13,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:13,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:14,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884094015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:14,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884094018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884094018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884094018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:14,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884094032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:14,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:14,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:14,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:14,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/c15a0eae1fa54cd78f481c48e5d82bd7 2024-12-11T02:27:14,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d30642754fe4449fbe8896a60e4c6fa1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1 2024-12-11T02:27:14,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1, entries=150, sequenceid=115, filesize=11.7 K 2024-12-11T02:27:14,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/2006b3c464f44fc2be4a6f5573e3a1a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3 2024-12-11T02:27:14,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3, entries=150, sequenceid=115, filesize=11.7 K 2024-12-11T02:27:14,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/c15a0eae1fa54cd78f481c48e5d82bd7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7 2024-12-11T02:27:14,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7, entries=150, sequenceid=115, filesize=11.7 K 2024-12-11T02:27:14,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 66c347f1441760076f62fd1847fd01aa in 1320ms, sequenceid=115, compaction requested=true 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:14,212 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:14,212 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:14,214 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:14,214 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:14,214 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,214 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e886d2029cc748b5850b3e4342d6299a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=47.0 K 2024-12-11T02:27:14,214 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:14,214 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:14,214 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,214 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/86b8d561a0ee410d9f6295ce16e32bc4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=47.0 K 2024-12-11T02:27:14,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b8d561a0ee410d9f6295ce16e32bc4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:14,215 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e886d2029cc748b5850b3e4342d6299a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:14,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c571ccaba0141c787b4497e19e94d21, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733884030028 2024-12-11T02:27:14,215 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b9c467ab11a4bd1ba2632ff8da93195, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733884030028 2024-12-11T02:27:14,216 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c0a4b0f08d4e82a565860a3452a83a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733884030644 2024-12-11T02:27:14,216 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6f3f08634b04b1a8771324c2164230c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733884030644 2024-12-11T02:27:14,216 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2006b3c464f44fc2be4a6f5573e3a1a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:14,216 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d30642754fe4449fbe8896a60e4c6fa1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:14,238 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#285 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:14,239 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d09c0ca0c952403b823682c5af30e313 is 50, key is test_row_0/A:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:14,242 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#286 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:14,243 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e09ee8a501b14e2f8b03f9e82a039d1d is 50, key is test_row_0/B:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:14,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742167_1343 (size=12241) 2024-12-11T02:27:14,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742168_1344 (size=12241) 2024-12-11T02:27:14,267 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:14,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-11T02:27:14,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,268 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-11T02:27:14,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:14,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:14,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:14,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:14,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:14,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:14,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/cc32ce7fc0a74492ae93b1fa746a6074 is 50, key is test_row_0/A:col10/1733884032897/Put/seqid=0 2024-12-11T02:27:14,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742169_1345 (size=12001) 2024-12-11T02:27:14,288 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/cc32ce7fc0a74492ae93b1fa746a6074 2024-12-11T02:27:14,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/d61ab90db53249cdb24806d910afc35b is 50, key is test_row_0/B:col10/1733884032897/Put/seqid=0 2024-12-11T02:27:14,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742170_1346 (size=12001) 2024-12-11T02:27:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:14,661 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d09c0ca0c952403b823682c5af30e313 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d09c0ca0c952403b823682c5af30e313 2024-12-11T02:27:14,662 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e09ee8a501b14e2f8b03f9e82a039d1d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e09ee8a501b14e2f8b03f9e82a039d1d 2024-12-11T02:27:14,668 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into d09c0ca0c952403b823682c5af30e313(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:14,668 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:14,668 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=12, startTime=1733884034212; duration=0sec 2024-12-11T02:27:14,669 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:14,669 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:14,669 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:14,670 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:14,670 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into e09ee8a501b14e2f8b03f9e82a039d1d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:14,670 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:14,670 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:14,670 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=12, startTime=1733884034212; duration=0sec 2024-12-11T02:27:14,670 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,670 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:14,670 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:14,671 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0ed825a08f714fd086815657dda286cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=47.0 K 2024-12-11T02:27:14,671 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ed825a08f714fd086815657dda286cb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884029400 2024-12-11T02:27:14,671 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d8967cb07154bd4b5430344a0e8ca51, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733884030028 2024-12-11T02:27:14,672 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ad57efcf4df4a18bc1248f53982ebd0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1733884030644 2024-12-11T02:27:14,672 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c15a0eae1fa54cd78f481c48e5d82bd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:14,681 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:14,682 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0d204ed63cee40c8bb9562ff9472f14b is 50, key is test_row_0/C:col10/1733884032785/Put/seqid=0 2024-12-11T02:27:14,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742171_1347 (size=12241) 2024-12-11T02:27:14,719 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/d61ab90db53249cdb24806d910afc35b 2024-12-11T02:27:14,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/22fe1e1888574b8f986ce662608a1f77 is 50, key is test_row_0/C:col10/1733884032897/Put/seqid=0 2024-12-11T02:27:14,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742172_1348 (size=12001) 2024-12-11T02:27:14,732 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/22fe1e1888574b8f986ce662608a1f77 2024-12-11T02:27:14,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/cc32ce7fc0a74492ae93b1fa746a6074 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074 2024-12-11T02:27:14,741 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074, entries=150, sequenceid=125, filesize=11.7 K 2024-12-11T02:27:14,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/d61ab90db53249cdb24806d910afc35b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b 2024-12-11T02:27:14,746 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b, entries=150, sequenceid=125, filesize=11.7 K 2024-12-11T02:27:14,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/22fe1e1888574b8f986ce662608a1f77 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77 2024-12-11T02:27:14,755 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77, entries=150, sequenceid=125, filesize=11.7 K 2024-12-11T02:27:14,756 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 66c347f1441760076f62fd1847fd01aa in 487ms, sequenceid=125, compaction requested=false 2024-12-11T02:27:14,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:14,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:14,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-11T02:27:14,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-11T02:27:14,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-11T02:27:14,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4080 sec 2024-12-11T02:27:14,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.4130 sec 2024-12-11T02:27:15,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:15,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:15,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:15,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:15,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:15,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/39788151991346e5b7f89be03312cf5e is 50, key is test_row_0/A:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742173_1349 (size=16931) 2024-12-11T02:27:15,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884095054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884095055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884095055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884095056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884095057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,107 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0d204ed63cee40c8bb9562ff9472f14b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0d204ed63cee40c8bb9562ff9472f14b 2024-12-11T02:27:15,112 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into 0d204ed63cee40c8bb9562ff9472f14b(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:15,112 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:15,112 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=12, startTime=1733884034212; duration=0sec 2024-12-11T02:27:15,112 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:15,112 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:15,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884095159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884095160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884095160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884095160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884095160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884095364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884095364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884095364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884095364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884095368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/39788151991346e5b7f89be03312cf5e 2024-12-11T02:27:15,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c68be07b42724ed8b1390f1487eb39cb is 50, key is test_row_0/B:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742174_1350 (size=12151) 2024-12-11T02:27:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T02:27:15,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c68be07b42724ed8b1390f1487eb39cb 2024-12-11T02:27:15,452 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-11T02:27:15,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-11T02:27:15,457 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:15,457 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:15,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-11T02:27:15,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/df3abf823be54bcfb5217d2684e10768 is 50, key is test_row_0/C:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742175_1351 (size=12151) 2024-12-11T02:27:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-11T02:27:15,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-11T02:27:15,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:15,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884095667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884095667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884095669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884095669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:15,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884095669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-11T02:27:15,763 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-11T02:27:15,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:15,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:15,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/df3abf823be54bcfb5217d2684e10768 2024-12-11T02:27:15,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/39788151991346e5b7f89be03312cf5e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e 2024-12-11T02:27:15,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e, entries=250, sequenceid=138, filesize=16.5 K 2024-12-11T02:27:15,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/c68be07b42724ed8b1390f1487eb39cb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb 2024-12-11T02:27:15,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb, entries=150, sequenceid=138, filesize=11.9 K 2024-12-11T02:27:15,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/df3abf823be54bcfb5217d2684e10768 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768 2024-12-11T02:27:15,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768, entries=150, sequenceid=138, filesize=11.9 K 2024-12-11T02:27:15,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 66c347f1441760076f62fd1847fd01aa in 859ms, sequenceid=138, compaction requested=true 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:15,889 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:15,889 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:15,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:15,890 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41173 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:15,890 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:15,890 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:15,890 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:15,890 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,890 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,891 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d09c0ca0c952403b823682c5af30e313, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=40.2 K 2024-12-11T02:27:15,891 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e09ee8a501b14e2f8b03f9e82a039d1d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.5 K 2024-12-11T02:27:15,891 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d09c0ca0c952403b823682c5af30e313, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:15,891 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e09ee8a501b14e2f8b03f9e82a039d1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:15,891 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc32ce7fc0a74492ae93b1fa746a6074, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1733884032897 2024-12-11T02:27:15,891 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d61ab90db53249cdb24806d910afc35b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1733884032897 2024-12-11T02:27:15,892 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39788151991346e5b7f89be03312cf5e, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035023 2024-12-11T02:27:15,892 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c68be07b42724ed8b1390f1487eb39cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035028 2024-12-11T02:27:15,899 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:15,899 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/c50d986f104341198cb7a6ed8d9e6e92 is 50, key is test_row_0/A:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,902 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#295 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:15,902 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/b4a75cc90b7d40249af55c6647921db6 is 50, key is test_row_0/B:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742176_1352 (size=12493) 2024-12-11T02:27:15,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:15,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-11T02:27:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,917 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:27:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:15,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:15,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0428800973af4e86a22e422362206e25 is 50, key is test_row_0/A:col10/1733884035053/Put/seqid=0 2024-12-11T02:27:15,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742177_1353 (size=12493) 2024-12-11T02:27:15,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742178_1354 (size=12151) 2024-12-11T02:27:15,947 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0428800973af4e86a22e422362206e25 2024-12-11T02:27:15,949 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/b4a75cc90b7d40249af55c6647921db6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b4a75cc90b7d40249af55c6647921db6 2024-12-11T02:27:15,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/3dda7ab457094d30b633413d56b55dd8 is 50, key is test_row_0/B:col10/1733884035053/Put/seqid=0 2024-12-11T02:27:15,956 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into b4a75cc90b7d40249af55c6647921db6(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:15,956 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:15,956 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=13, startTime=1733884035889; duration=0sec 2024-12-11T02:27:15,956 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:15,956 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:15,956 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:15,957 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:15,957 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:15,957 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:15,958 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0d204ed63cee40c8bb9562ff9472f14b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.5 K 2024-12-11T02:27:15,958 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d204ed63cee40c8bb9562ff9472f14b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733884032785 2024-12-11T02:27:15,958 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 22fe1e1888574b8f986ce662608a1f77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1733884032897 2024-12-11T02:27:15,959 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting df3abf823be54bcfb5217d2684e10768, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035028 2024-12-11T02:27:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742179_1355 (size=12151) 2024-12-11T02:27:15,966 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/3dda7ab457094d30b633413d56b55dd8 2024-12-11T02:27:15,973 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:15,979 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/663cfda180a4427da7d232336d030690 is 50, key is test_row_0/C:col10/1733884035028/Put/seqid=0 2024-12-11T02:27:15,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7736b344b72a4b8fba1d182db73e29f0 is 50, key is test_row_0/C:col10/1733884035053/Put/seqid=0 2024-12-11T02:27:15,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742180_1356 (size=12493) 2024-12-11T02:27:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742181_1357 (size=12151) 2024-12-11T02:27:16,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-11T02:27:16,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:16,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:16,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884096177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884096177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884096180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884096180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884096181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884096281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884096281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884096283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884096284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884096284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,324 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/c50d986f104341198cb7a6ed8d9e6e92 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/c50d986f104341198cb7a6ed8d9e6e92 2024-12-11T02:27:16,329 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into c50d986f104341198cb7a6ed8d9e6e92(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:16,329 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:16,329 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=13, startTime=1733884035889; duration=0sec 2024-12-11T02:27:16,329 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:16,329 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:16,393 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/663cfda180a4427da7d232336d030690 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/663cfda180a4427da7d232336d030690 2024-12-11T02:27:16,395 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7736b344b72a4b8fba1d182db73e29f0 2024-12-11T02:27:16,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0428800973af4e86a22e422362206e25 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25 2024-12-11T02:27:16,400 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into 663cfda180a4427da7d232336d030690(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:16,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:16,400 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=13, startTime=1733884035889; duration=0sec 2024-12-11T02:27:16,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:16,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:16,404 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25, entries=150, sequenceid=164, filesize=11.9 K 2024-12-11T02:27:16,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/3dda7ab457094d30b633413d56b55dd8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8 2024-12-11T02:27:16,409 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8, entries=150, sequenceid=164, filesize=11.9 K 2024-12-11T02:27:16,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7736b344b72a4b8fba1d182db73e29f0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0 2024-12-11T02:27:16,414 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0, entries=150, sequenceid=164, filesize=11.9 K 2024-12-11T02:27:16,416 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 66c347f1441760076f62fd1847fd01aa in 498ms, sequenceid=164, compaction requested=false 2024-12-11T02:27:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-11T02:27:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-11T02:27:16,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-11T02:27:16,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 959 msec 2024-12-11T02:27:16,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 965 msec 2024-12-11T02:27:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:16,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:16,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:16,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7ac737be3c1b4f32a6a56055c04ee574 is 50, key is test_row_0/A:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:16,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884096502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884096503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884096504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884096505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884096506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742182_1358 (size=12151) 2024-12-11T02:27:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-11T02:27:16,565 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-11T02:27:16,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-11T02:27:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:16,570 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:16,571 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:16,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:16,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884096607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884096608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884096608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884096609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884096609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:16,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:16,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:16,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:16,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884096811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884096812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884096812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884096813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:16,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884096813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:16,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:16,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:16,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:16,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:16,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:16,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:16,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7ac737be3c1b4f32a6a56055c04ee574 2024-12-11T02:27:16,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/48db5db58b654132a1bc0b5ff54c6ae2 is 50, key is test_row_0/B:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742183_1359 (size=12151) 2024-12-11T02:27:17,028 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884097116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884097117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884097117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884097117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884097117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:17,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/48db5db58b654132a1bc0b5ff54c6ae2 2024-12-11T02:27:17,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:17,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/8acf02d41c07464f8f352543f9afa2db is 50, key is test_row_0/C:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:17,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742184_1360 (size=12151) 2024-12-11T02:27:17,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/8acf02d41c07464f8f352543f9afa2db 2024-12-11T02:27:17,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7ac737be3c1b4f32a6a56055c04ee574 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574 2024-12-11T02:27:17,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574, entries=150, sequenceid=180, filesize=11.9 K 2024-12-11T02:27:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/48db5db58b654132a1bc0b5ff54c6ae2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2 2024-12-11T02:27:17,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2, entries=150, sequenceid=180, filesize=11.9 K 2024-12-11T02:27:17,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/8acf02d41c07464f8f352543f9afa2db as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db 2024-12-11T02:27:17,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db, entries=150, sequenceid=180, filesize=11.9 K 2024-12-11T02:27:17,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 66c347f1441760076f62fd1847fd01aa in 895ms, sequenceid=180, compaction requested=true 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:17,380 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:17,380 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:17,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:17,382 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:17,382 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:17,382 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,382 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/c50d986f104341198cb7a6ed8d9e6e92, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.9 K 2024-12-11T02:27:17,383 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c50d986f104341198cb7a6ed8d9e6e92, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035028 2024-12-11T02:27:17,383 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:17,383 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:17,383 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,383 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b4a75cc90b7d40249af55c6647921db6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.9 K 2024-12-11T02:27:17,383 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0428800973af4e86a22e422362206e25, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884035053 2024-12-11T02:27:17,384 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ac737be3c1b4f32a6a56055c04ee574, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:17,384 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b4a75cc90b7d40249af55c6647921db6, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035028 2024-12-11T02:27:17,384 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dda7ab457094d30b633413d56b55dd8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884035053 2024-12-11T02:27:17,385 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 48db5db58b654132a1bc0b5ff54c6ae2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:17,395 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:17,396 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d5324b391ed0411b815277f06e0f1847 is 50, key is test_row_0/A:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:17,402 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#304 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:17,402 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/24d519961f394f6ab7a12f5b44fbe394 is 50, key is test_row_0/B:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742186_1362 (size=12595) 2024-12-11T02:27:17,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742185_1361 (size=12595) 2024-12-11T02:27:17,416 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/24d519961f394f6ab7a12f5b44fbe394 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/24d519961f394f6ab7a12f5b44fbe394 2024-12-11T02:27:17,421 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into 24d519961f394f6ab7a12f5b44fbe394(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:17,421 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:17,421 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=13, startTime=1733884037380; duration=0sec 2024-12-11T02:27:17,421 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:17,421 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:17,421 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:17,422 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:17,422 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:17,423 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,423 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/663cfda180a4427da7d232336d030690, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=35.9 K 2024-12-11T02:27:17,423 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 663cfda180a4427da7d232336d030690, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733884035028 2024-12-11T02:27:17,423 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7736b344b72a4b8fba1d182db73e29f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733884035053 2024-12-11T02:27:17,424 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8acf02d41c07464f8f352543f9afa2db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:17,431 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:17,432 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/d9cb4d56e0e14a8bb0c375df497a42d9 is 50, key is test_row_0/C:col10/1733884036175/Put/seqid=0 2024-12-11T02:27:17,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742187_1363 (size=12595) 2024-12-11T02:27:17,446 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/d9cb4d56e0e14a8bb0c375df497a42d9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d9cb4d56e0e14a8bb0c375df497a42d9 2024-12-11T02:27:17,452 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into d9cb4d56e0e14a8bb0c375df497a42d9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:17,452 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:17,452 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=13, startTime=1733884037380; duration=0sec 2024-12-11T02:27:17,452 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:17,452 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:17,489 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T02:27:17,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:17,490 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:17,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:17,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/9b067195454349f2a33a0b958531b056 is 50, key is test_row_0/A:col10/1733884036502/Put/seqid=0 2024-12-11T02:27:17,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742188_1364 (size=12151) 2024-12-11T02:27:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:17,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:17,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884097626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884097626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884097629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884097629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884097629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:17,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884097730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884097730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884097734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884097734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884097734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,819 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/d5324b391ed0411b815277f06e0f1847 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d5324b391ed0411b815277f06e0f1847 2024-12-11T02:27:17,828 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into d5324b391ed0411b815277f06e0f1847(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:17,828 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:17,828 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=13, startTime=1733884037380; duration=0sec 2024-12-11T02:27:17,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:17,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:17,900 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/9b067195454349f2a33a0b958531b056 2024-12-11T02:27:17,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8c72b2cc502b4b98ab653404c4e41d92 is 50, key is test_row_0/B:col10/1733884036502/Put/seqid=0 2024-12-11T02:27:17,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884097933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884097933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884097937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884097937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884097937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:17,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742189_1365 (size=12151) 2024-12-11T02:27:17,940 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8c72b2cc502b4b98ab653404c4e41d92 2024-12-11T02:27:17,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/cd0445619e9846289efffbd135f643d0 is 50, key is test_row_0/C:col10/1733884036502/Put/seqid=0 2024-12-11T02:27:17,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742190_1366 (size=12151) 2024-12-11T02:27:18,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884098235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884098237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884098239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884098240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884098241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,365 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/cd0445619e9846289efffbd135f643d0 2024-12-11T02:27:18,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/9b067195454349f2a33a0b958531b056 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056 2024-12-11T02:27:18,374 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056, entries=150, sequenceid=205, filesize=11.9 K 2024-12-11T02:27:18,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8c72b2cc502b4b98ab653404c4e41d92 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92 2024-12-11T02:27:18,380 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92, entries=150, sequenceid=205, filesize=11.9 K 2024-12-11T02:27:18,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/cd0445619e9846289efffbd135f643d0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0 2024-12-11T02:27:18,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0, entries=150, sequenceid=205, filesize=11.9 K 2024-12-11T02:27:18,385 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 66c347f1441760076f62fd1847fd01aa in 896ms, sequenceid=205, compaction requested=false 2024-12-11T02:27:18,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:18,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:18,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-11T02:27:18,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-11T02:27:18,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-11T02:27:18,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8150 sec 2024-12-11T02:27:18,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.8220 sec 2024-12-11T02:27:18,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T02:27:18,673 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-11T02:27:18,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-11T02:27:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:18,676 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:18,676 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:18,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:18,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:18,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:18,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:18,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/791a82f07c27485c99ee86cc9d731954 is 50, key is test_row_0/A:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:18,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884098766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884098766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884098766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884098767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742191_1367 (size=12151) 2024-12-11T02:27:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884098767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/791a82f07c27485c99ee86cc9d731954 2024-12-11T02:27:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:18,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/701b92f833284c64a273d0ecbd4e6503 is 50, key is test_row_0/B:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:18,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:18,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:18,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:18,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:18,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:18,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:18,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742192_1368 (size=12151) 2024-12-11T02:27:18,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/701b92f833284c64a273d0ecbd4e6503 2024-12-11T02:27:18,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/386c09bd96f64f41a5d19f779f683f34 is 50, key is test_row_0/C:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:18,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742193_1369 (size=12151) 2024-12-11T02:27:18,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884098872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884098872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884098872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884098873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884098874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:18,980 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:18,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:18,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884099074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884099075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884099075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884099076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884099077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:19,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/386c09bd96f64f41a5d19f779f683f34 2024-12-11T02:27:19,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/791a82f07c27485c99ee86cc9d731954 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954 2024-12-11T02:27:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:19,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954, entries=150, sequenceid=221, filesize=11.9 K 2024-12-11T02:27:19,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/701b92f833284c64a273d0ecbd4e6503 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503 2024-12-11T02:27:19,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503, entries=150, sequenceid=221, filesize=11.9 K 2024-12-11T02:27:19,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/386c09bd96f64f41a5d19f779f683f34 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34 2024-12-11T02:27:19,289 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34, entries=150, sequenceid=221, filesize=11.9 K 2024-12-11T02:27:19,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:19,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 66c347f1441760076f62fd1847fd01aa in 551ms, sequenceid=221, compaction requested=true 2024-12-11T02:27:19,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,292 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:19,292 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:19,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:19,293 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:19,293 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:19,293 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,293 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d5324b391ed0411b815277f06e0f1847, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=36.0 K 2024-12-11T02:27:19,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:19,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:19,293 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,294 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5324b391ed0411b815277f06e0f1847, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:19,294 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/24d519961f394f6ab7a12f5b44fbe394, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=36.0 K 2024-12-11T02:27:19,294 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 24d519961f394f6ab7a12f5b44fbe394, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:19,294 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b067195454349f2a33a0b958531b056, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733884036500 2024-12-11T02:27:19,294 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c72b2cc502b4b98ab653404c4e41d92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733884036500 2024-12-11T02:27:19,294 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 791a82f07c27485c99ee86cc9d731954, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:19,295 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 701b92f833284c64a273d0ecbd4e6503, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:19,302 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:19,303 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/87a87b71737240b2aa35b2b8f038ed82 is 50, key is test_row_0/A:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:19,308 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:19,309 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/994766761bfa4a1bbc572bd25e8eea11 is 50, key is test_row_0/B:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:19,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742194_1370 (size=12697) 2024-12-11T02:27:19,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742195_1371 (size=12697) 2024-12-11T02:27:19,331 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/994766761bfa4a1bbc572bd25e8eea11 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/994766761bfa4a1bbc572bd25e8eea11 2024-12-11T02:27:19,337 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into 994766761bfa4a1bbc572bd25e8eea11(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:19,337 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,337 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=13, startTime=1733884039292; duration=0sec 2024-12-11T02:27:19,337 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:19,338 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:19,338 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:19,339 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:19,339 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:19,339 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,339 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d9cb4d56e0e14a8bb0c375df497a42d9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=36.0 K 2024-12-11T02:27:19,340 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d9cb4d56e0e14a8bb0c375df497a42d9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733884036175 2024-12-11T02:27:19,340 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cd0445619e9846289efffbd135f643d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733884036500 2024-12-11T02:27:19,340 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 386c09bd96f64f41a5d19f779f683f34, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:19,349 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#314 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:19,350 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/cbec665dc03545fb96bf2d200aefa643 is 50, key is test_row_0/C:col10/1733884037627/Put/seqid=0 2024-12-11T02:27:19,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742196_1372 (size=12697) 2024-12-11T02:27:19,370 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/cbec665dc03545fb96bf2d200aefa643 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cbec665dc03545fb96bf2d200aefa643 2024-12-11T02:27:19,377 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into cbec665dc03545fb96bf2d200aefa643(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:19,377 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,378 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=13, startTime=1733884039292; duration=0sec 2024-12-11T02:27:19,378 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:19,378 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:19,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:27:19,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:19,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/22dc6e290eff46b6b9ae868d865a71c0 is 50, key is test_row_0/A:col10/1733884039383/Put/seqid=0 2024-12-11T02:27:19,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884099391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884099392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884099393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884099393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884099393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742197_1373 (size=12151) 2024-12-11T02:27:19,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/22dc6e290eff46b6b9ae868d865a71c0 2024-12-11T02:27:19,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/53e2bedcf6154fedb3fc6cd1c8849e6c is 50, key is test_row_0/B:col10/1733884039383/Put/seqid=0 2024-12-11T02:27:19,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742198_1374 (size=12151) 2024-12-11T02:27:19,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:19,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884099496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884099497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884099497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884099497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884099497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884099700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884099700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884099700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884099700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884099701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,727 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/87a87b71737240b2aa35b2b8f038ed82 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/87a87b71737240b2aa35b2b8f038ed82 2024-12-11T02:27:19,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into 87a87b71737240b2aa35b2b8f038ed82(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:19,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,731 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=13, startTime=1733884039292; duration=0sec 2024-12-11T02:27:19,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:19,731 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:19,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:19,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:19,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:19,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/53e2bedcf6154fedb3fc6cd1c8849e6c 2024-12-11T02:27:19,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/88907d7fdb024ab29b8b9e099af16535 is 50, key is test_row_0/C:col10/1733884039383/Put/seqid=0 2024-12-11T02:27:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742199_1375 (size=12151) 2024-12-11T02:27:19,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/88907d7fdb024ab29b8b9e099af16535 2024-12-11T02:27:19,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/22dc6e290eff46b6b9ae868d865a71c0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0 2024-12-11T02:27:19,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T02:27:19,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/53e2bedcf6154fedb3fc6cd1c8849e6c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c 2024-12-11T02:27:19,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T02:27:19,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/88907d7fdb024ab29b8b9e099af16535 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535 2024-12-11T02:27:19,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T02:27:19,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 66c347f1441760076f62fd1847fd01aa in 485ms, sequenceid=248, compaction requested=false 2024-12-11T02:27:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:19,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T02:27:19,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,903 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:19,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:19,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/775f36a64c0d4b50956e98496e5f46b2 is 50, key is test_row_0/A:col10/1733884039390/Put/seqid=0 2024-12-11T02:27:19,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742200_1376 (size=12251) 2024-12-11T02:27:19,918 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/775f36a64c0d4b50956e98496e5f46b2 2024-12-11T02:27:19,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8dcfb018db3d46bc97fae55ee15ace87 is 50, key is test_row_0/B:col10/1733884039390/Put/seqid=0 2024-12-11T02:27:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742201_1377 (size=12251) 2024-12-11T02:27:19,941 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8dcfb018db3d46bc97fae55ee15ace87 2024-12-11T02:27:19,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a6ca37c4ca2d4805af24454dd196595e is 50, key is test_row_0/C:col10/1733884039390/Put/seqid=0 2024-12-11T02:27:19,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742202_1378 (size=12251) 2024-12-11T02:27:19,962 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a6ca37c4ca2d4805af24454dd196595e 2024-12-11T02:27:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/775f36a64c0d4b50956e98496e5f46b2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2 2024-12-11T02:27:19,971 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2, entries=150, sequenceid=260, filesize=12.0 K 2024-12-11T02:27:19,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/8dcfb018db3d46bc97fae55ee15ace87 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87 2024-12-11T02:27:19,976 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87, entries=150, sequenceid=260, filesize=12.0 K 2024-12-11T02:27:19,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/a6ca37c4ca2d4805af24454dd196595e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e 2024-12-11T02:27:19,982 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e, entries=150, sequenceid=260, filesize=12.0 K 2024-12-11T02:27:19,983 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 66c347f1441760076f62fd1847fd01aa in 80ms, sequenceid=260, compaction requested=true 2024-12-11T02:27:19,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:19,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:19,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-11T02:27:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-11T02:27:19,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-11T02:27:19,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3080 sec 2024-12-11T02:27:19,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.3130 sec 2024-12-11T02:27:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:20,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:20,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/e4f35ebb94ee48298f1e28121c774977 is 50, key is test_row_0/A:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742203_1379 (size=12301) 2024-12-11T02:27:20,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/e4f35ebb94ee48298f1e28121c774977 2024-12-11T02:27:20,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/1182a9aa003946ed82998aee08db6638 is 50, key is test_row_0/B:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742204_1380 (size=12301) 2024-12-11T02:27:20,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/1182a9aa003946ed82998aee08db6638 2024-12-11T02:27:20,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/fe7e1eab397b410298434a716ed1f135 is 50, key is test_row_0/C:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742205_1381 (size=12301) 2024-12-11T02:27:20,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/fe7e1eab397b410298434a716ed1f135 2024-12-11T02:27:20,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/e4f35ebb94ee48298f1e28121c774977 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977 2024-12-11T02:27:20,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977, entries=150, sequenceid=272, filesize=12.0 K 2024-12-11T02:27:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/1182a9aa003946ed82998aee08db6638 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638 2024-12-11T02:27:20,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638, entries=150, sequenceid=272, filesize=12.0 K 2024-12-11T02:27:20,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/fe7e1eab397b410298434a716ed1f135 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135 2024-12-11T02:27:20,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135, entries=150, sequenceid=272, filesize=12.0 K 2024-12-11T02:27:20,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 66c347f1441760076f62fd1847fd01aa in 458ms, sequenceid=272, compaction requested=true 2024-12-11T02:27:20,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:20,468 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:20,468 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:20,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:20,469 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:20,469 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:20,469 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:20,469 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/87a87b71737240b2aa35b2b8f038ed82, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.2 K 2024-12-11T02:27:20,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:20,470 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87a87b71737240b2aa35b2b8f038ed82, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:20,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:20,470 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:20,470 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/994766761bfa4a1bbc572bd25e8eea11, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.2 K 2024-12-11T02:27:20,470 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22dc6e290eff46b6b9ae868d865a71c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733884039381 2024-12-11T02:27:20,470 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 994766761bfa4a1bbc572bd25e8eea11, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:20,471 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 775f36a64c0d4b50956e98496e5f46b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733884039390 2024-12-11T02:27:20,471 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 53e2bedcf6154fedb3fc6cd1c8849e6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733884039381 2024-12-11T02:27:20,471 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4f35ebb94ee48298f1e28121c774977, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:20,471 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dcfb018db3d46bc97fae55ee15ace87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733884039390 2024-12-11T02:27:20,471 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1182a9aa003946ed82998aee08db6638, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:20,480 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:20,480 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:20,480 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/fb1184484fcf4c019e842006c647dc72 is 50, key is test_row_0/B:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,481 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/ad66c0238b9e4b7c9e453db9495f4b70 is 50, key is test_row_0/A:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742206_1382 (size=12983) 2024-12-11T02:27:20,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742207_1383 (size=12983) 2024-12-11T02:27:20,494 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/ad66c0238b9e4b7c9e453db9495f4b70 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ad66c0238b9e4b7c9e453db9495f4b70 2024-12-11T02:27:20,499 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into ad66c0238b9e4b7c9e453db9495f4b70(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:20,499 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:20,499 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=12, startTime=1733884040467; duration=0sec 2024-12-11T02:27:20,499 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:20,500 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:20,500 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:20,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49400 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:20,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:20,502 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:20,502 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cbec665dc03545fb96bf2d200aefa643, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.2 K 2024-12-11T02:27:20,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbec665dc03545fb96bf2d200aefa643, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733884037627 2024-12-11T02:27:20,503 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88907d7fdb024ab29b8b9e099af16535, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733884039381 2024-12-11T02:27:20,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6ca37c4ca2d4805af24454dd196595e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1733884039390 2024-12-11T02:27:20,504 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe7e1eab397b410298434a716ed1f135, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:20,513 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#326 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:20,514 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b1c65efcf69b41b6ab58a18b3f5143cc is 50, key is test_row_0/C:col10/1733884040006/Put/seqid=0 2024-12-11T02:27:20,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742208_1384 (size=12983) 2024-12-11T02:27:20,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:20,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:20,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:20,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/029c5aff79a34764bb4c658109c43830 is 50, key is test_row_0/A:col10/1733884040640/Put/seqid=0 2024-12-11T02:27:20,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742209_1385 (size=12301) 2024-12-11T02:27:20,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/029c5aff79a34764bb4c658109c43830 2024-12-11T02:27:20,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/31a14184937b44ceaed7ce4ace36a11b is 50, key is test_row_0/B:col10/1733884040640/Put/seqid=0 2024-12-11T02:27:20,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742210_1386 (size=12301) 2024-12-11T02:27:20,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T02:27:20,780 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-11T02:27:20,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:20,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-11T02:27:20,784 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:20,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:20,784 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:20,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:20,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:20,890 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/fb1184484fcf4c019e842006c647dc72 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb1184484fcf4c019e842006c647dc72 2024-12-11T02:27:20,895 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into fb1184484fcf4c019e842006c647dc72(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:20,895 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:20,895 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=12, startTime=1733884040468; duration=0sec 2024-12-11T02:27:20,895 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:20,895 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:20,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T02:27:20,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:20,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:20,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:20,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:20,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:20,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:20,955 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b1c65efcf69b41b6ab58a18b3f5143cc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b1c65efcf69b41b6ab58a18b3f5143cc 2024-12-11T02:27:20,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884100958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884100958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,960 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into b1c65efcf69b41b6ab58a18b3f5143cc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:20,961 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:20,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,961 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=12, startTime=1733884040468; duration=0sec 2024-12-11T02:27:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884100959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,961 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:20,961 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:20,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884100960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:20,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:20,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884100960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/31a14184937b44ceaed7ce4ace36a11b 2024-12-11T02:27:21,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:21,090 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b30864e278146a08a81b2ddcbc69273 is 50, key is test_row_0/C:col10/1733884040640/Put/seqid=0 2024-12-11T02:27:21,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T02:27:21,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:21,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742211_1387 (size=12301) 2024-12-11T02:27:21,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T02:27:21,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:21,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884101261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884101264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884101264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884101264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884101264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:21,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T02:27:21,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:21,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:21,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b30864e278146a08a81b2ddcbc69273 2024-12-11T02:27:21,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/029c5aff79a34764bb4c658109c43830 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830 2024-12-11T02:27:21,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830, entries=150, sequenceid=298, filesize=12.0 K 2024-12-11T02:27:21,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/31a14184937b44ceaed7ce4ace36a11b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b 2024-12-11T02:27:21,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b, entries=150, sequenceid=298, filesize=12.0 K 2024-12-11T02:27:21,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b30864e278146a08a81b2ddcbc69273 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273 2024-12-11T02:27:21,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273, entries=150, sequenceid=298, filesize=12.0 K 2024-12-11T02:27:21,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 66c347f1441760076f62fd1847fd01aa in 879ms, sequenceid=298, compaction requested=false 2024-12-11T02:27:21,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:21,553 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T02:27:21,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:21,554 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:21,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/37646dce72444671854a4f35623abd11 is 50, key is test_row_0/A:col10/1733884040647/Put/seqid=0 2024-12-11T02:27:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742212_1388 (size=12301) 2024-12-11T02:27:21,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:21,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:21,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884101782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884101783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884101785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884101785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884101786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884101887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884101887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884101889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884101890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:21,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884101890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:21,964 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/37646dce72444671854a4f35623abd11 2024-12-11T02:27:21,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/fb3f687ac4a947a29eac73953e119dae is 50, key is test_row_0/B:col10/1733884040647/Put/seqid=0 2024-12-11T02:27:21,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742213_1389 (size=12301) 2024-12-11T02:27:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884102090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884102091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884102093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884102093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884102094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,376 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/fb3f687ac4a947a29eac73953e119dae 2024-12-11T02:27:22,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/520c35dbab384782b699141a3a6ec8ba is 50, key is test_row_0/C:col10/1733884040647/Put/seqid=0 2024-12-11T02:27:22,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742214_1390 (size=12301) 2024-12-11T02:27:22,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884102393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884102395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884102395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884102396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884102398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,788 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/520c35dbab384782b699141a3a6ec8ba 2024-12-11T02:27:22,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/37646dce72444671854a4f35623abd11 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11 2024-12-11T02:27:22,798 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T02:27:22,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/fb3f687ac4a947a29eac73953e119dae as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae 2024-12-11T02:27:22,802 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T02:27:22,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/520c35dbab384782b699141a3a6ec8ba as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba 2024-12-11T02:27:22,806 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T02:27:22,807 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 66c347f1441760076f62fd1847fd01aa in 1254ms, sequenceid=311, compaction requested=true 2024-12-11T02:27:22,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:22,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:22,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-11T02:27:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-11T02:27:22,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-11T02:27:22,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0290 sec 2024-12-11T02:27:22,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.0330 sec 2024-12-11T02:27:22,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T02:27:22,888 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-11T02:27:22,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:22,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-11T02:27:22,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:22,891 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:22,892 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:22,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:22,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:22,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:27:22,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:22,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:22,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:22,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:22,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:22,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:22,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0ef5fd9d94fd425dbb0d3896f63446aa is 50, key is test_row_0/A:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884102905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884102906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742215_1391 (size=14741) 2024-12-11T02:27:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884102906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884102907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0ef5fd9d94fd425dbb0d3896f63446aa 2024-12-11T02:27:22,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:22,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884102909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:22,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/95bbcd346d7444939f63d2dacc7b4a00 is 50, key is test_row_0/B:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:22,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742216_1392 (size=12301) 2024-12-11T02:27:22,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:23,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884103010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884103010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884103010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884103010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884103012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,044 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:23,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:23,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:23,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884103212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884103214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884103214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884103214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884103215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/95bbcd346d7444939f63d2dacc7b4a00 2024-12-11T02:27:23,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b10be3e092a47c1885aeff039df1adb is 50, key is test_row_0/C:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:23,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742217_1393 (size=12301) 2024-12-11T02:27:23,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:23,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:23,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:23,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884103515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884103516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884103518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884103519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884103519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:23,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:23,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b10be3e092a47c1885aeff039df1adb 2024-12-11T02:27:23,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/0ef5fd9d94fd425dbb0d3896f63446aa as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa 2024-12-11T02:27:23,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa, entries=200, sequenceid=338, filesize=14.4 K 2024-12-11T02:27:23,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/95bbcd346d7444939f63d2dacc7b4a00 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00 2024-12-11T02:27:23,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00, entries=150, sequenceid=338, filesize=12.0 K 2024-12-11T02:27:23,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/6b10be3e092a47c1885aeff039df1adb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb 2024-12-11T02:27:23,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb, entries=150, sequenceid=338, filesize=12.0 K 2024-12-11T02:27:23,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 66c347f1441760076f62fd1847fd01aa in 851ms, sequenceid=338, compaction requested=true 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:23,751 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:23,751 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:23,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:23,754 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:23,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:23,754 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:23,754 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,754 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,754 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ad66c0238b9e4b7c9e453db9495f4b70, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=51.1 K 2024-12-11T02:27:23,754 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb1184484fcf4c019e842006c647dc72, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.7 K 2024-12-11T02:27:23,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fb1184484fcf4c019e842006c647dc72, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:23,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad66c0238b9e4b7c9e453db9495f4b70, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:23,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 029c5aff79a34764bb4c658109c43830, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733884040028 2024-12-11T02:27:23,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 31a14184937b44ceaed7ce4ace36a11b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733884040028 2024-12-11T02:27:23,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37646dce72444671854a4f35623abd11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733884040647 2024-12-11T02:27:23,756 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fb3f687ac4a947a29eac73953e119dae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733884040647 2024-12-11T02:27:23,756 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ef5fd9d94fd425dbb0d3896f63446aa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041780 2024-12-11T02:27:23,756 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 95bbcd346d7444939f63d2dacc7b4a00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041784 2024-12-11T02:27:23,765 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:23,766 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e50acf8f1b9c41aca5f241af9cc6e3d2 is 50, key is test_row_0/B:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:23,766 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#337 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:23,766 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/a82c95e60cfd4bf9825841817b458b60 is 50, key is test_row_0/A:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742218_1394 (size=13119) 2024-12-11T02:27:23,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742219_1395 (size=13119) 2024-12-11T02:27:23,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:23,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T02:27:23,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:23,809 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:23,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:23,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/77a0268ade2c428bb2c745fd26b18d96 is 50, key is test_row_0/A:col10/1733884042905/Put/seqid=0 2024-12-11T02:27:23,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742220_1396 (size=9857) 2024-12-11T02:27:23,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:24,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:24,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:24,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884104044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884104047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884104047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884104048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884104048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884104149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884104153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884104153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884104153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884104153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,179 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/e50acf8f1b9c41aca5f241af9cc6e3d2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e50acf8f1b9c41aca5f241af9cc6e3d2 2024-12-11T02:27:24,179 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/a82c95e60cfd4bf9825841817b458b60 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/a82c95e60cfd4bf9825841817b458b60 2024-12-11T02:27:24,184 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into a82c95e60cfd4bf9825841817b458b60(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:24,184 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into e50acf8f1b9c41aca5f241af9cc6e3d2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:24,184 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=12, startTime=1733884043751; duration=0sec 2024-12-11T02:27:24,184 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=12, startTime=1733884043751; duration=0sec 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:24,184 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:24,186 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:24,186 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:24,186 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:24,186 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b1c65efcf69b41b6ab58a18b3f5143cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.7 K 2024-12-11T02:27:24,186 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1c65efcf69b41b6ab58a18b3f5143cc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733884040006 2024-12-11T02:27:24,187 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b30864e278146a08a81b2ddcbc69273, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733884040028 2024-12-11T02:27:24,187 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 520c35dbab384782b699141a3a6ec8ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733884040647 2024-12-11T02:27:24,187 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b10be3e092a47c1885aeff039df1adb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041784 2024-12-11T02:27:24,195 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:24,196 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/2f7451c5aa5441fc93ce74442b195ce2 is 50, key is test_row_0/C:col10/1733884041784/Put/seqid=0 2024-12-11T02:27:24,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742221_1397 (size=13119) 2024-12-11T02:27:24,205 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/2f7451c5aa5441fc93ce74442b195ce2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2f7451c5aa5441fc93ce74442b195ce2 2024-12-11T02:27:24,210 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into 2f7451c5aa5441fc93ce74442b195ce2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:24,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:24,210 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=12, startTime=1733884043751; duration=0sec 2024-12-11T02:27:24,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:24,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:24,221 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/77a0268ade2c428bb2c745fd26b18d96 2024-12-11T02:27:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/f77750ef977e425992e29732b734d3e3 is 50, key is test_row_0/B:col10/1733884042905/Put/seqid=0 2024-12-11T02:27:24,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742222_1398 (size=9857) 2024-12-11T02:27:24,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884104353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884104356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884104357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884104357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884104358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,632 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/f77750ef977e425992e29732b734d3e3 2024-12-11T02:27:24,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5a80b4f2fcd0469c9805959174ebcae0 is 50, key is test_row_0/C:col10/1733884042905/Put/seqid=0 2024-12-11T02:27:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742223_1399 (size=9857) 2024-12-11T02:27:24,648 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5a80b4f2fcd0469c9805959174ebcae0 2024-12-11T02:27:24,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/77a0268ade2c428bb2c745fd26b18d96 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96 2024-12-11T02:27:24,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,658 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96, entries=100, sequenceid=347, filesize=9.6 K 2024-12-11T02:27:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884104656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/f77750ef977e425992e29732b734d3e3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3 2024-12-11T02:27:24,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884104660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,662 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3, entries=100, sequenceid=347, filesize=9.6 K 2024-12-11T02:27:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884104660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884104660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884104661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:24,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/5a80b4f2fcd0469c9805959174ebcae0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0 2024-12-11T02:27:24,674 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0, entries=100, sequenceid=347, filesize=9.6 K 2024-12-11T02:27:24,675 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=174.43 KB/178620 for 66c347f1441760076f62fd1847fd01aa in 866ms, sequenceid=347, compaction requested=false 2024-12-11T02:27:24,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:24,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:24,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-11T02:27:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-11T02:27:24,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-11T02:27:24,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7840 sec 2024-12-11T02:27:24,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.7890 sec 2024-12-11T02:27:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T02:27:24,995 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-11T02:27:24,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-11T02:27:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T02:27:24,998 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:24,998 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:24,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T02:27:25,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-11T02:27:25,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:25,151 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-11T02:27:25,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:25,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/361cc1aef3cd4ae199eebf96bc6c4027 is 50, key is test_row_0/A:col10/1733884044046/Put/seqid=0 2024-12-11T02:27:25,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:25,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:25,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742224_1400 (size=12301) 2024-12-11T02:27:25,164 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/361cc1aef3cd4ae199eebf96bc6c4027 2024-12-11T02:27:25,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884105166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884105166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884105168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884105168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884105169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/43becb01a656446d8cbc35942d1881d6 is 50, key is test_row_0/B:col10/1733884044046/Put/seqid=0 2024-12-11T02:27:25,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742225_1401 (size=12301) 2024-12-11T02:27:25,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884105269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884105272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884105272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T02:27:25,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884105472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884105473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884105475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,586 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/43becb01a656446d8cbc35942d1881d6 2024-12-11T02:27:25,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7a644a335591417e8e9caf4a6135a518 is 50, key is test_row_0/C:col10/1733884044046/Put/seqid=0 2024-12-11T02:27:25,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742226_1402 (size=12301) 2024-12-11T02:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T02:27:25,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884105775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884105776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884105779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:25,999 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7a644a335591417e8e9caf4a6135a518 2024-12-11T02:27:26,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/361cc1aef3cd4ae199eebf96bc6c4027 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027 2024-12-11T02:27:26,007 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027, entries=150, sequenceid=379, filesize=12.0 K 2024-12-11T02:27:26,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/43becb01a656446d8cbc35942d1881d6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6 2024-12-11T02:27:26,011 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6, entries=150, sequenceid=379, filesize=12.0 K 2024-12-11T02:27:26,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/7a644a335591417e8e9caf4a6135a518 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518 2024-12-11T02:27:26,017 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518, entries=150, sequenceid=379, filesize=12.0 K 2024-12-11T02:27:26,018 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 66c347f1441760076f62fd1847fd01aa in 867ms, sequenceid=379, compaction requested=true 2024-12-11T02:27:26,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:26,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-11T02:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-11T02:27:26,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-11T02:27:26,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0210 sec 2024-12-11T02:27:26,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.0260 sec 2024-12-11T02:27:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T02:27:26,101 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-11T02:27:26,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:26,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-11T02:27:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T02:27:26,104 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:26,105 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:26,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:26,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:26,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/303564fdc04447afa8f72a322268cf2c is 50, key is test_row_0/A:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742227_1403 (size=14741) 2024-12-11T02:27:26,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T02:27:26,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884106220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884106220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T02:27:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884106282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884106282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884106282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884106324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884106324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T02:27:26,410 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T02:27:26,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:26,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884106528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884106529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T02:27:26,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:26,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:26,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/303564fdc04447afa8f72a322268cf2c 2024-12-11T02:27:26,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/873ec18ee36340518f1f602d93a1f510 is 50, key is test_row_0/B:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742228_1404 (size=12301) 2024-12-11T02:27:26,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/873ec18ee36340518f1f602d93a1f510 2024-12-11T02:27:26,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/517853f115824b12aa13dfa97a3f1093 is 50, key is test_row_0/C:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742229_1405 (size=12301) 2024-12-11T02:27:26,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/517853f115824b12aa13dfa97a3f1093 2024-12-11T02:27:26,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/303564fdc04447afa8f72a322268cf2c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c 2024-12-11T02:27:26,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c, entries=200, sequenceid=390, filesize=14.4 K 2024-12-11T02:27:26,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/873ec18ee36340518f1f602d93a1f510 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510 2024-12-11T02:27:26,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510, entries=150, sequenceid=390, filesize=12.0 K 2024-12-11T02:27:26,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/517853f115824b12aa13dfa97a3f1093 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093 2024-12-11T02:27:26,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093, entries=150, sequenceid=390, filesize=12.0 K 2024-12-11T02:27:26,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 66c347f1441760076f62fd1847fd01aa in 475ms, sequenceid=390, compaction requested=true 2024-12-11T02:27:26,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:26,656 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:26,656 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:26,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:26,657 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50018 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:26,657 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:26,657 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:26,657 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,657 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:26,657 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/a82c95e60cfd4bf9825841817b458b60, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=48.8 K 2024-12-11T02:27:26,657 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,658 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e50acf8f1b9c41aca5f241af9cc6e3d2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=46.5 K 2024-12-11T02:27:26,658 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a82c95e60cfd4bf9825841817b458b60, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041784 2024-12-11T02:27:26,658 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e50acf8f1b9c41aca5f241af9cc6e3d2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041784 2024-12-11T02:27:26,658 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77a0268ade2c428bb2c745fd26b18d96, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733884042905 2024-12-11T02:27:26,658 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f77750ef977e425992e29732b734d3e3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733884042905 2024-12-11T02:27:26,659 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 361cc1aef3cd4ae199eebf96bc6c4027, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733884044045 2024-12-11T02:27:26,659 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 43becb01a656446d8cbc35942d1881d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733884044045 2024-12-11T02:27:26,659 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 873ec18ee36340518f1f602d93a1f510, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:26,659 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 303564fdc04447afa8f72a322268cf2c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:26,682 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#349 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:26,683 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:26,683 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/b719d8a6ab7245fb94e9eb9b5cc446ee is 50, key is test_row_0/A:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,684 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/65056c6c1bad4cdaacba2c0b1309d7b0 is 50, key is test_row_0/B:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742230_1406 (size=13255) 2024-12-11T02:27:26,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742231_1407 (size=13255) 2024-12-11T02:27:26,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T02:27:26,708 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/b719d8a6ab7245fb94e9eb9b5cc446ee as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b719d8a6ab7245fb94e9eb9b5cc446ee 2024-12-11T02:27:26,713 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into b719d8a6ab7245fb94e9eb9b5cc446ee(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:26,713 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:26,713 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=12, startTime=1733884046655; duration=0sec 2024-12-11T02:27:26,713 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:26,713 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:26,713 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:26,714 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:26,714 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:26,714 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,714 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2f7451c5aa5441fc93ce74442b195ce2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=46.5 K 2024-12-11T02:27:26,715 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f7451c5aa5441fc93ce74442b195ce2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1733884041784 2024-12-11T02:27:26,715 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a80b4f2fcd0469c9805959174ebcae0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733884042905 2024-12-11T02:27:26,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T02:27:26,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:26,716 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a644a335591417e8e9caf4a6135a518, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733884044045 2024-12-11T02:27:26,716 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:27:26,717 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 517853f115824b12aa13dfa97a3f1093, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:26,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:26,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/dc7deb785dc24b6e81a10a17f4554473 is 50, key is test_row_0/A:col10/1733884046218/Put/seqid=0 2024-12-11T02:27:26,730 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#351 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:26,730 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b676ca0e4a974bd492d26e8ec33fd4fb is 50, key is test_row_0/C:col10/1733884046178/Put/seqid=0 2024-12-11T02:27:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742232_1408 (size=12301) 2024-12-11T02:27:26,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742233_1409 (size=13255) 2024-12-11T02:27:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:26,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:26,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884106852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884106852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884106956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:26,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:26,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884106957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,111 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/65056c6c1bad4cdaacba2c0b1309d7b0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/65056c6c1bad4cdaacba2c0b1309d7b0 2024-12-11T02:27:27,117 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into 65056c6c1bad4cdaacba2c0b1309d7b0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:27,117 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:27,117 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=12, startTime=1733884046656; duration=0sec 2024-12-11T02:27:27,117 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:27,117 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:27,134 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/dc7deb785dc24b6e81a10a17f4554473 2024-12-11T02:27:27,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4518d73a13aa4dc8805b97b77a1176b9 is 50, key is test_row_0/B:col10/1733884046218/Put/seqid=0 2024-12-11T02:27:27,147 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b676ca0e4a974bd492d26e8ec33fd4fb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b676ca0e4a974bd492d26e8ec33fd4fb 2024-12-11T02:27:27,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742234_1410 (size=12301) 2024-12-11T02:27:27,150 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4518d73a13aa4dc8805b97b77a1176b9 2024-12-11T02:27:27,153 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into b676ca0e4a974bd492d26e8ec33fd4fb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:27,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:27,153 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=12, startTime=1733884046656; duration=0sec 2024-12-11T02:27:27,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:27,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:27,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884107158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b384185d6a7046718b395ac045e35742 is 50, key is test_row_0/C:col10/1733884046218/Put/seqid=0 2024-12-11T02:27:27,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884107160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742235_1411 (size=12301) 2024-12-11T02:27:27,168 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b384185d6a7046718b395ac045e35742 2024-12-11T02:27:27,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/dc7deb785dc24b6e81a10a17f4554473 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473 2024-12-11T02:27:27,176 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473, entries=150, sequenceid=417, filesize=12.0 K 2024-12-11T02:27:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4518d73a13aa4dc8805b97b77a1176b9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9 2024-12-11T02:27:27,180 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9, entries=150, sequenceid=417, filesize=12.0 K 2024-12-11T02:27:27,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/b384185d6a7046718b395ac045e35742 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742 2024-12-11T02:27:27,186 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742, entries=150, sequenceid=417, filesize=12.0 K 2024-12-11T02:27:27,187 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 66c347f1441760076f62fd1847fd01aa in 470ms, sequenceid=417, compaction requested=false 2024-12-11T02:27:27,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:27,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-11T02:27:27,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-11T02:27:27,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-11T02:27:27,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0830 sec 2024-12-11T02:27:27,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.0880 sec 2024-12-11T02:27:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T02:27:27,208 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-11T02:27:27,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-11T02:27:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:27,211 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:27,212 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:27,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:27,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:27,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:27,288 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7b770536c2af47158a7b0e8de93c5ab9 is 50, key is test_row_0/A:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:27,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742236_1412 (size=12301) 2024-12-11T02:27:27,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:27,364 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-11T02:27:27,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:27,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,365 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884107376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884107376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884107376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884107462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884107464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884107480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884107480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884107480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:27,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-11T02:27:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-11T02:27:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:27,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884107682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884107682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884107682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7b770536c2af47158a7b0e8de93c5ab9 2024-12-11T02:27:27,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/ca60056275774872a063ac5c08b87d28 is 50, key is test_row_0/B:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:27,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742237_1413 (size=12301) 2024-12-11T02:27:27,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/ca60056275774872a063ac5c08b87d28 2024-12-11T02:27:27,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/2dbb789327b44c388fa77dd614016a58 is 50, key is test_row_0/C:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742238_1414 (size=12301) 2024-12-11T02:27:27,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/2dbb789327b44c388fa77dd614016a58 2024-12-11T02:27:27,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/7b770536c2af47158a7b0e8de93c5ab9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9 2024-12-11T02:27:27,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T02:27:27,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/ca60056275774872a063ac5c08b87d28 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28 2024-12-11T02:27:27,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T02:27:27,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/2dbb789327b44c388fa77dd614016a58 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58 2024-12-11T02:27:27,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T02:27:27,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 66c347f1441760076f62fd1847fd01aa in 453ms, sequenceid=431, compaction requested=true 2024-12-11T02:27:27,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:27,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:27,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:27,740 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:27,740 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:27,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:27,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:27,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:27,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/A is initiating minor compaction (all files) 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:27,742 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/A in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/B is initiating minor compaction (all files) 2024-12-11T02:27:27,742 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b719d8a6ab7245fb94e9eb9b5cc446ee, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=37.0 K 2024-12-11T02:27:27,742 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/B in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,742 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/65056c6c1bad4cdaacba2c0b1309d7b0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=37.0 K 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b719d8a6ab7245fb94e9eb9b5cc446ee, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:27,742 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 65056c6c1bad4cdaacba2c0b1309d7b0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:27,743 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc7deb785dc24b6e81a10a17f4554473, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733884046214 2024-12-11T02:27:27,743 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4518d73a13aa4dc8805b97b77a1176b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733884046214 2024-12-11T02:27:27,743 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b770536c2af47158a7b0e8de93c5ab9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733884046846 2024-12-11T02:27:27,743 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ca60056275774872a063ac5c08b87d28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733884046846 2024-12-11T02:27:27,751 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#A#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:27,752 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/3471020fe07e4a88a60ed1cf39238479 is 50, key is test_row_0/A:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:27,754 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#B#compaction#358 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:27,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/f36a834e982e44a6a5bfb96040d95c6f is 50, key is test_row_0/B:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:27,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742240_1416 (size=13357) 2024-12-11T02:27:27,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742239_1415 (size=13357) 2024-12-11T02:27:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:27,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:27,824 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:27,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/ea3d04f45ac54987bbb3a84fba920699 is 50, key is test_row_0/A:col10/1733884047374/Put/seqid=0 2024-12-11T02:27:27,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742241_1417 (size=12301) 2024-12-11T02:27:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:27,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. as already flushing 2024-12-11T02:27:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884107980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884107982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884107984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884107984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:27,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884107986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884108083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884108085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,167 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/f36a834e982e44a6a5bfb96040d95c6f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f36a834e982e44a6a5bfb96040d95c6f 2024-12-11T02:27:28,172 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/B of 66c347f1441760076f62fd1847fd01aa into f36a834e982e44a6a5bfb96040d95c6f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:28,172 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:28,172 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/B, priority=13, startTime=1733884047740; duration=0sec 2024-12-11T02:27:28,172 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:28,172 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:28,172 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:28,173 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:28,174 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 66c347f1441760076f62fd1847fd01aa/C is initiating minor compaction (all files) 2024-12-11T02:27:28,174 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 66c347f1441760076f62fd1847fd01aa/C in TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:28,174 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b676ca0e4a974bd492d26e8ec33fd4fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp, totalSize=37.0 K 2024-12-11T02:27:28,174 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b676ca0e4a974bd492d26e8ec33fd4fb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1733884045167 2024-12-11T02:27:28,174 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b384185d6a7046718b395ac045e35742, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=417, earliestPutTs=1733884046214 2024-12-11T02:27:28,175 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/3471020fe07e4a88a60ed1cf39238479 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/3471020fe07e4a88a60ed1cf39238479 2024-12-11T02:27:28,175 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dbb789327b44c388fa77dd614016a58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733884046846 2024-12-11T02:27:28,188 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/A of 66c347f1441760076f62fd1847fd01aa into 3471020fe07e4a88a60ed1cf39238479(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:28,188 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:28,188 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/A, priority=13, startTime=1733884047740; duration=0sec 2024-12-11T02:27:28,188 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:28,188 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:28,191 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 66c347f1441760076f62fd1847fd01aa#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:28,192 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/8e729a9c82664bc5bb86c77dbb843747 is 50, key is test_row_0/C:col10/1733884046851/Put/seqid=0 2024-12-11T02:27:28,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742242_1418 (size=13357) 2024-12-11T02:27:28,248 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/ea3d04f45ac54987bbb3a84fba920699 2024-12-11T02:27:28,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/88fe4c64c1704090821382695524b805 is 50, key is test_row_0/B:col10/1733884047374/Put/seqid=0 2024-12-11T02:27:28,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742243_1419 (size=12301) 2024-12-11T02:27:28,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884108286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884108289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:28,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1733884108487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46264 deadline: 1733884108489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46294 deadline: 1733884108491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46316 deadline: 1733884108590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46266 deadline: 1733884108593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:28,617 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/8e729a9c82664bc5bb86c77dbb843747 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8e729a9c82664bc5bb86c77dbb843747 2024-12-11T02:27:28,626 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 66c347f1441760076f62fd1847fd01aa/C of 66c347f1441760076f62fd1847fd01aa into 8e729a9c82664bc5bb86c77dbb843747(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:28,626 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:28,626 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa., storeName=66c347f1441760076f62fd1847fd01aa/C, priority=13, startTime=1733884047741; duration=0sec 2024-12-11T02:27:28,626 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:28,627 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:28,661 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/88fe4c64c1704090821382695524b805 2024-12-11T02:27:28,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/76802cd503134773b8785ed1a3200d50 is 50, key is test_row_0/C:col10/1733884047374/Put/seqid=0 2024-12-11T02:27:28,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742244_1420 (size=12301) 2024-12-11T02:27:29,020 DEBUG [Thread-1455 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x598cfed4 to 127.0.0.1:63149 2024-12-11T02:27:29,020 DEBUG [Thread-1455 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,020 DEBUG [Thread-1453 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:63149 2024-12-11T02:27:29,021 DEBUG [Thread-1453 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,021 DEBUG [Thread-1451 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:63149 2024-12-11T02:27:29,021 DEBUG [Thread-1451 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,022 DEBUG [Thread-1447 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:63149 2024-12-11T02:27:29,022 DEBUG [Thread-1447 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,022 DEBUG [Thread-1449 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:63149 2024-12-11T02:27:29,022 DEBUG [Thread-1449 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,073 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/76802cd503134773b8785ed1a3200d50 2024-12-11T02:27:29,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/ea3d04f45ac54987bbb3a84fba920699 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ea3d04f45ac54987bbb3a84fba920699 2024-12-11T02:27:29,080 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ea3d04f45ac54987bbb3a84fba920699, entries=150, sequenceid=455, filesize=12.0 K 2024-12-11T02:27:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/88fe4c64c1704090821382695524b805 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/88fe4c64c1704090821382695524b805 2024-12-11T02:27:29,084 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/88fe4c64c1704090821382695524b805, entries=150, sequenceid=455, filesize=12.0 K 2024-12-11T02:27:29,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/76802cd503134773b8785ed1a3200d50 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/76802cd503134773b8785ed1a3200d50 2024-12-11T02:27:29,087 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/76802cd503134773b8785ed1a3200d50, entries=150, sequenceid=455, filesize=12.0 K 2024-12-11T02:27:29,088 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 66c347f1441760076f62fd1847fd01aa in 1264ms, sequenceid=455, compaction requested=false 2024-12-11T02:27:29,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:29,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:29,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-11T02:27:29,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-11T02:27:29,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-11T02:27:29,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8770 sec 2024-12-11T02:27:29,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.8820 sec 2024-12-11T02:27:29,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:29,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:27:29,097 DEBUG [Thread-1436 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:63149 2024-12-11T02:27:29,097 DEBUG [Thread-1436 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:29,098 DEBUG [Thread-1440 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:63149 2024-12-11T02:27:29,098 DEBUG [Thread-1440 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:29,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:29,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/bfef917769b541efa89fb09e56520959 is 50, key is test_row_0/A:col10/1733884049096/Put/seqid=0 2024-12-11T02:27:29,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742245_1421 (size=12301) 2024-12-11T02:27:29,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T02:27:29,315 INFO [Thread-1446 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-11T02:27:29,493 DEBUG [Thread-1444 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:63149 2024-12-11T02:27:29,493 DEBUG [Thread-1444 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,501 DEBUG [Thread-1442 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:63149 2024-12-11T02:27:29,501 DEBUG [Thread-1442 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,503 DEBUG [Thread-1438 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:63149 2024-12-11T02:27:29,503 DEBUG [Thread-1438 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6654 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6321 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6448 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6626 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6324 2024-12-11T02:27:29,504 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:27:29,504 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:27:29,504 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:63149 2024-12-11T02:27:29,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:29,504 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:27:29,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:27:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:29,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/bfef917769b541efa89fb09e56520959 2024-12-11T02:27:29,508 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884049507"}]},"ts":"1733884049507"} 2024-12-11T02:27:29,508 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:27:29,511 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:27:29,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:27:29,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/b6f911340ce84370b80d5e5006acd69d is 50, key is test_row_0/B:col10/1733884049096/Put/seqid=0 2024-12-11T02:27:29,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, UNASSIGN}] 2024-12-11T02:27:29,515 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, UNASSIGN 2024-12-11T02:27:29,515 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=66c347f1441760076f62fd1847fd01aa, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:29,516 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:27:29,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:29,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742246_1422 (size=12301) 2024-12-11T02:27:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:29,668 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:29,668 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:29,668 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:27:29,668 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 66c347f1441760076f62fd1847fd01aa, disabling compactions & flushes 2024-12-11T02:27:29,668 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:29,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/b6f911340ce84370b80d5e5006acd69d 2024-12-11T02:27:29,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0efddc1f4b07434aabad81f2dda75832 is 50, key is test_row_0/C:col10/1733884049096/Put/seqid=0 2024-12-11T02:27:29,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742247_1423 (size=12301) 2024-12-11T02:27:30,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:30,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0efddc1f4b07434aabad81f2dda75832 2024-12-11T02:27:30,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/bfef917769b541efa89fb09e56520959 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/bfef917769b541efa89fb09e56520959 2024-12-11T02:27:30,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/bfef917769b541efa89fb09e56520959, entries=150, sequenceid=472, filesize=12.0 K 2024-12-11T02:27:30,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/b6f911340ce84370b80d5e5006acd69d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b6f911340ce84370b80d5e5006acd69d 2024-12-11T02:27:30,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b6f911340ce84370b80d5e5006acd69d, entries=150, sequenceid=472, filesize=12.0 K 2024-12-11T02:27:30,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/0efddc1f4b07434aabad81f2dda75832 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0efddc1f4b07434aabad81f2dda75832 2024-12-11T02:27:30,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0efddc1f4b07434aabad81f2dda75832, entries=150, sequenceid=472, filesize=12.0 K 2024-12-11T02:27:30,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=20.13 KB/20610 for 66c347f1441760076f62fd1847fd01aa in 1245ms, sequenceid=472, compaction requested=true 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:30,342 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. because compaction request was cancelled 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. after waiting 0 ms 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 66c347f1441760076f62fd1847fd01aa:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:30,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. because compaction request was cancelled 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:A 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:B 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. because compaction request was cancelled 2024-12-11T02:27:30,342 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 66c347f1441760076f62fd1847fd01aa:C 2024-12-11T02:27:30,342 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 66c347f1441760076f62fd1847fd01aa 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=A 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=B 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 66c347f1441760076f62fd1847fd01aa, store=C 2024-12-11T02:27:30,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:30,346 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/b49b44f5fd784b6da655a8bca7b81740 is 50, key is test_row_0/A:col10/1733884049502/Put/seqid=0 2024-12-11T02:27:30,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742248_1424 (size=9857) 2024-12-11T02:27:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:30,750 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/b49b44f5fd784b6da655a8bca7b81740 2024-12-11T02:27:30,755 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4693eed6f0554ff3bc2a6d73eda64fff is 50, key is test_row_0/B:col10/1733884049502/Put/seqid=0 2024-12-11T02:27:30,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742249_1425 (size=9857) 2024-12-11T02:27:31,159 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4693eed6f0554ff3bc2a6d73eda64fff 2024-12-11T02:27:31,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/1f14d4a300584046af8f3a21546910c4 is 50, key is test_row_0/C:col10/1733884049502/Put/seqid=0 2024-12-11T02:27:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742250_1426 (size=9857) 2024-12-11T02:27:31,569 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/1f14d4a300584046af8f3a21546910c4 2024-12-11T02:27:31,572 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/A/b49b44f5fd784b6da655a8bca7b81740 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b49b44f5fd784b6da655a8bca7b81740 2024-12-11T02:27:31,575 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b49b44f5fd784b6da655a8bca7b81740, entries=100, sequenceid=478, filesize=9.6 K 2024-12-11T02:27:31,576 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/B/4693eed6f0554ff3bc2a6d73eda64fff as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4693eed6f0554ff3bc2a6d73eda64fff 2024-12-11T02:27:31,579 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4693eed6f0554ff3bc2a6d73eda64fff, entries=100, sequenceid=478, filesize=9.6 K 2024-12-11T02:27:31,579 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/.tmp/C/1f14d4a300584046af8f3a21546910c4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/1f14d4a300584046af8f3a21546910c4 2024-12-11T02:27:31,582 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/1f14d4a300584046af8f3a21546910c4, entries=100, sequenceid=478, filesize=9.6 K 2024-12-11T02:27:31,583 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 66c347f1441760076f62fd1847fd01aa in 1241ms, sequenceid=478, compaction requested=true 2024-12-11T02:27:31,583 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e886d2029cc748b5850b3e4342d6299a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d09c0ca0c952403b823682c5af30e313, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/c50d986f104341198cb7a6ed8d9e6e92, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d5324b391ed0411b815277f06e0f1847, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/87a87b71737240b2aa35b2b8f038ed82, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ad66c0238b9e4b7c9e453db9495f4b70, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/a82c95e60cfd4bf9825841817b458b60, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b719d8a6ab7245fb94e9eb9b5cc446ee, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9] to archive 2024-12-11T02:27:31,584 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:31,586 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/6f8e5b70e3cc4c05999f7c0ed9bd9fa3 2024-12-11T02:27:31,586 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d6f3f08634b04b1a8771324c2164230c 2024-12-11T02:27:31,586 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d09c0ca0c952403b823682c5af30e313 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d09c0ca0c952403b823682c5af30e313 2024-12-11T02:27:31,587 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/305f3345834d468c864d935f0c309bf7 2024-12-11T02:27:31,587 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/1b9c467ab11a4bd1ba2632ff8da93195 2024-12-11T02:27:31,587 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7d99f9a48904442d885d1a21fd26e0fb 2024-12-11T02:27:31,587 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e886d2029cc748b5850b3e4342d6299a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e886d2029cc748b5850b3e4342d6299a 2024-12-11T02:27:31,587 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d30642754fe4449fbe8896a60e4c6fa1 2024-12-11T02:27:31,588 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/cc32ce7fc0a74492ae93b1fa746a6074 2024-12-11T02:27:31,588 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0428800973af4e86a22e422362206e25 2024-12-11T02:27:31,588 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/39788151991346e5b7f89be03312cf5e 2024-12-11T02:27:31,589 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d5324b391ed0411b815277f06e0f1847 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/d5324b391ed0411b815277f06e0f1847 2024-12-11T02:27:31,589 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/9b067195454349f2a33a0b958531b056 2024-12-11T02:27:31,589 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/c50d986f104341198cb7a6ed8d9e6e92 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/c50d986f104341198cb7a6ed8d9e6e92 2024-12-11T02:27:31,589 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7ac737be3c1b4f32a6a56055c04ee574 2024-12-11T02:27:31,589 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/87a87b71737240b2aa35b2b8f038ed82 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/87a87b71737240b2aa35b2b8f038ed82 2024-12-11T02:27:31,590 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/791a82f07c27485c99ee86cc9d731954 2024-12-11T02:27:31,590 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/22dc6e290eff46b6b9ae868d865a71c0 2024-12-11T02:27:31,591 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/775f36a64c0d4b50956e98496e5f46b2 2024-12-11T02:27:31,591 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/e4f35ebb94ee48298f1e28121c774977 2024-12-11T02:27:31,591 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ad66c0238b9e4b7c9e453db9495f4b70 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ad66c0238b9e4b7c9e453db9495f4b70 2024-12-11T02:27:31,591 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/37646dce72444671854a4f35623abd11 2024-12-11T02:27:31,592 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/0ef5fd9d94fd425dbb0d3896f63446aa 2024-12-11T02:27:31,592 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/029c5aff79a34764bb4c658109c43830 2024-12-11T02:27:31,592 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/77a0268ade2c428bb2c745fd26b18d96 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/a82c95e60cfd4bf9825841817b458b60 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/a82c95e60cfd4bf9825841817b458b60 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/361cc1aef3cd4ae199eebf96bc6c4027 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/303564fdc04447afa8f72a322268cf2c 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b719d8a6ab7245fb94e9eb9b5cc446ee to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b719d8a6ab7245fb94e9eb9b5cc446ee 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/7b770536c2af47158a7b0e8de93c5ab9 2024-12-11T02:27:31,593 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/dc7deb785dc24b6e81a10a17f4554473 2024-12-11T02:27:31,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/86b8d561a0ee410d9f6295ce16e32bc4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e09ee8a501b14e2f8b03f9e82a039d1d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b4a75cc90b7d40249af55c6647921db6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/24d519961f394f6ab7a12f5b44fbe394, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/994766761bfa4a1bbc572bd25e8eea11, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb1184484fcf4c019e842006c647dc72, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e50acf8f1b9c41aca5f241af9cc6e3d2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/65056c6c1bad4cdaacba2c0b1309d7b0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28] to archive 2024-12-11T02:27:31,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:31,597 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e96a4342a34544699afc39aedb1ff61b 2024-12-11T02:27:31,598 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c2b9dad8bf2544f08715a0d7e428ab0f 2024-12-11T02:27:31,598 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/5c571ccaba0141c787b4497e19e94d21 2024-12-11T02:27:31,598 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e79c9b36940743f2878af881205a15c2 2024-12-11T02:27:31,598 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/86b8d561a0ee410d9f6295ce16e32bc4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/86b8d561a0ee410d9f6295ce16e32bc4 2024-12-11T02:27:31,599 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/2006b3c464f44fc2be4a6f5573e3a1a3 2024-12-11T02:27:31,599 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/30c0a4b0f08d4e82a565860a3452a83a 2024-12-11T02:27:31,599 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e09ee8a501b14e2f8b03f9e82a039d1d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e09ee8a501b14e2f8b03f9e82a039d1d 2024-12-11T02:27:31,600 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/d61ab90db53249cdb24806d910afc35b 2024-12-11T02:27:31,600 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b4a75cc90b7d40249af55c6647921db6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b4a75cc90b7d40249af55c6647921db6 2024-12-11T02:27:31,600 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/3dda7ab457094d30b633413d56b55dd8 2024-12-11T02:27:31,600 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/24d519961f394f6ab7a12f5b44fbe394 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/24d519961f394f6ab7a12f5b44fbe394 2024-12-11T02:27:31,600 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/c68be07b42724ed8b1390f1487eb39cb 2024-12-11T02:27:31,601 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/48db5db58b654132a1bc0b5ff54c6ae2 2024-12-11T02:27:31,601 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8c72b2cc502b4b98ab653404c4e41d92 2024-12-11T02:27:31,601 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/994766761bfa4a1bbc572bd25e8eea11 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/994766761bfa4a1bbc572bd25e8eea11 2024-12-11T02:27:31,602 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/701b92f833284c64a273d0ecbd4e6503 2024-12-11T02:27:31,602 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/53e2bedcf6154fedb3fc6cd1c8849e6c 2024-12-11T02:27:31,602 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/1182a9aa003946ed82998aee08db6638 2024-12-11T02:27:31,602 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/8dcfb018db3d46bc97fae55ee15ace87 2024-12-11T02:27:31,603 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb1184484fcf4c019e842006c647dc72 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb1184484fcf4c019e842006c647dc72 2024-12-11T02:27:31,603 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/31a14184937b44ceaed7ce4ace36a11b 2024-12-11T02:27:31,603 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/fb3f687ac4a947a29eac73953e119dae 2024-12-11T02:27:31,604 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e50acf8f1b9c41aca5f241af9cc6e3d2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/e50acf8f1b9c41aca5f241af9cc6e3d2 2024-12-11T02:27:31,604 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/95bbcd346d7444939f63d2dacc7b4a00 2024-12-11T02:27:31,604 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f77750ef977e425992e29732b734d3e3 2024-12-11T02:27:31,604 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/43becb01a656446d8cbc35942d1881d6 2024-12-11T02:27:31,605 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/65056c6c1bad4cdaacba2c0b1309d7b0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/65056c6c1bad4cdaacba2c0b1309d7b0 2024-12-11T02:27:31,605 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/873ec18ee36340518f1f602d93a1f510 2024-12-11T02:27:31,605 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4518d73a13aa4dc8805b97b77a1176b9 2024-12-11T02:27:31,605 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/ca60056275774872a063ac5c08b87d28 2024-12-11T02:27:31,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0ed825a08f714fd086815657dda286cb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0d204ed63cee40c8bb9562ff9472f14b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/663cfda180a4427da7d232336d030690, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d9cb4d56e0e14a8bb0c375df497a42d9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cbec665dc03545fb96bf2d200aefa643, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b1c65efcf69b41b6ab58a18b3f5143cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2f7451c5aa5441fc93ce74442b195ce2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b676ca0e4a974bd492d26e8ec33fd4fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58] to archive 2024-12-11T02:27:31,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:27:31,609 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d94bc57e10a7419aa2d8f10e8256fe65 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0ed825a08f714fd086815657dda286cb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0ed825a08f714fd086815657dda286cb 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/365a52b164ff4ddd8ee1585df763c161 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/c15a0eae1fa54cd78f481c48e5d82bd7 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a39ccfeaeb0341c0ae65e00ecfc96e3e 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5ad57efcf4df4a18bc1248f53982ebd0 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0d204ed63cee40c8bb9562ff9472f14b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0d204ed63cee40c8bb9562ff9472f14b 2024-12-11T02:27:31,610 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/9d8967cb07154bd4b5430344a0e8ca51 2024-12-11T02:27:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:31,611 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/22fe1e1888574b8f986ce662608a1f77 2024-12-11T02:27:31,612 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/df3abf823be54bcfb5217d2684e10768 2024-12-11T02:27:31,612 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7736b344b72a4b8fba1d182db73e29f0 2024-12-11T02:27:31,612 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d9cb4d56e0e14a8bb0c375df497a42d9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/d9cb4d56e0e14a8bb0c375df497a42d9 2024-12-11T02:27:31,612 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cd0445619e9846289efffbd135f643d0 2024-12-11T02:27:31,612 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/663cfda180a4427da7d232336d030690 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/663cfda180a4427da7d232336d030690 2024-12-11T02:27:31,613 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8acf02d41c07464f8f352543f9afa2db 2024-12-11T02:27:31,613 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cbec665dc03545fb96bf2d200aefa643 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/cbec665dc03545fb96bf2d200aefa643 2024-12-11T02:27:31,614 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/386c09bd96f64f41a5d19f779f683f34 2024-12-11T02:27:31,614 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/a6ca37c4ca2d4805af24454dd196595e 2024-12-11T02:27:31,614 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/88907d7fdb024ab29b8b9e099af16535 2024-12-11T02:27:31,615 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b1c65efcf69b41b6ab58a18b3f5143cc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b1c65efcf69b41b6ab58a18b3f5143cc 2024-12-11T02:27:31,615 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b30864e278146a08a81b2ddcbc69273 2024-12-11T02:27:31,615 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/fe7e1eab397b410298434a716ed1f135 2024-12-11T02:27:31,615 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/520c35dbab384782b699141a3a6ec8ba 2024-12-11T02:27:31,616 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2f7451c5aa5441fc93ce74442b195ce2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2f7451c5aa5441fc93ce74442b195ce2 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/6b10be3e092a47c1885aeff039df1adb 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/5a80b4f2fcd0469c9805959174ebcae0 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/7a644a335591417e8e9caf4a6135a518 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b676ca0e4a974bd492d26e8ec33fd4fb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b676ca0e4a974bd492d26e8ec33fd4fb 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/517853f115824b12aa13dfa97a3f1093 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/b384185d6a7046718b395ac045e35742 2024-12-11T02:27:31,617 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/2dbb789327b44c388fa77dd614016a58 2024-12-11T02:27:31,621 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/recovered.edits/481.seqid, newMaxSeqId=481, maxSeqId=1 2024-12-11T02:27:31,622 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa. 2024-12-11T02:27:31,622 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 66c347f1441760076f62fd1847fd01aa: 2024-12-11T02:27:31,623 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:31,623 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=66c347f1441760076f62fd1847fd01aa, regionState=CLOSED 2024-12-11T02:27:31,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-11T02:27:31,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 66c347f1441760076f62fd1847fd01aa, server=5f57a24c5131,40311,1733883964600 in 2.1080 sec 2024-12-11T02:27:31,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-11T02:27:31,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=66c347f1441760076f62fd1847fd01aa, UNASSIGN in 2.1110 sec 2024-12-11T02:27:31,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-11T02:27:31,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.1160 sec 2024-12-11T02:27:31,629 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884051629"}]},"ts":"1733884051629"} 2024-12-11T02:27:31,630 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:27:31,632 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:27:31,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.1270 sec 2024-12-11T02:27:32,932 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:27:33,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T02:27:33,612 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-11T02:27:33,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:27:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,614 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T02:27:33,615 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,616 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:33,617 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/recovered.edits] 2024-12-11T02:27:33,622 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b49b44f5fd784b6da655a8bca7b81740 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/b49b44f5fd784b6da655a8bca7b81740 2024-12-11T02:27:33,622 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/3471020fe07e4a88a60ed1cf39238479 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/3471020fe07e4a88a60ed1cf39238479 2024-12-11T02:27:33,622 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/bfef917769b541efa89fb09e56520959 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/bfef917769b541efa89fb09e56520959 2024-12-11T02:27:33,622 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ea3d04f45ac54987bbb3a84fba920699 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/A/ea3d04f45ac54987bbb3a84fba920699 2024-12-11T02:27:33,626 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4693eed6f0554ff3bc2a6d73eda64fff to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/4693eed6f0554ff3bc2a6d73eda64fff 2024-12-11T02:27:33,626 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b6f911340ce84370b80d5e5006acd69d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/b6f911340ce84370b80d5e5006acd69d 2024-12-11T02:27:33,626 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f36a834e982e44a6a5bfb96040d95c6f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/f36a834e982e44a6a5bfb96040d95c6f 2024-12-11T02:27:33,626 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/88fe4c64c1704090821382695524b805 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/B/88fe4c64c1704090821382695524b805 2024-12-11T02:27:33,629 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/1f14d4a300584046af8f3a21546910c4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/1f14d4a300584046af8f3a21546910c4 2024-12-11T02:27:33,629 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/76802cd503134773b8785ed1a3200d50 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/76802cd503134773b8785ed1a3200d50 2024-12-11T02:27:33,629 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0efddc1f4b07434aabad81f2dda75832 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/0efddc1f4b07434aabad81f2dda75832 2024-12-11T02:27:33,629 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8e729a9c82664bc5bb86c77dbb843747 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/C/8e729a9c82664bc5bb86c77dbb843747 2024-12-11T02:27:33,632 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/recovered.edits/481.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa/recovered.edits/481.seqid 2024-12-11T02:27:33,632 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/66c347f1441760076f62fd1847fd01aa 2024-12-11T02:27:33,632 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:27:33,634 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,638 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:27:33,640 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:27:33,640 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,640 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:27:33,641 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733884053640"}]},"ts":"9223372036854775807"} 2024-12-11T02:27:33,642 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:27:33,642 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 66c347f1441760076f62fd1847fd01aa, NAME => 'TestAcidGuarantees,,1733884026862.66c347f1441760076f62fd1847fd01aa.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:27:33,642 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:27:33,642 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733884053642"}]},"ts":"9223372036854775807"} 2024-12-11T02:27:33,644 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:27:33,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-12-11T02:27:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T02:27:33,716 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-11T02:27:33,725 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=245 (was 248), OpenFileDescriptor=453 (was 465), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=386 (was 384) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4314 (was 4349) 2024-12-11T02:27:33,734 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=386, ProcessCount=11, AvailableMemoryMB=4314 2024-12-11T02:27:33,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:27:33,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:27:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:33,737 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:27:33,737 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:33,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-12-11T02:27:33,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:33,738 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:27:33,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742251_1427 (size=963) 2024-12-11T02:27:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:34,145 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:27:34,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742252_1428 (size=53) 2024-12-11T02:27:34,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:34,351 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-11T02:27:34,550 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:27:34,550 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a10f50dcc5dc0cf76420942b9469ad44, disabling compactions & flushes 2024-12-11T02:27:34,550 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,550 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,550 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. after waiting 0 ms 2024-12-11T02:27:34,550 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,551 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,551 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:34,552 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:27:34,552 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733884054552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733884054552"}]},"ts":"1733884054552"} 2024-12-11T02:27:34,553 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:27:34,553 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:27:34,554 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884054553"}]},"ts":"1733884054553"} 2024-12-11T02:27:34,554 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:27:34,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, ASSIGN}] 2024-12-11T02:27:34,559 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, ASSIGN 2024-12-11T02:27:34,560 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:27:34,710 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:34,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:34,863 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:34,866 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,866 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:27:34,866 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,866 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:27:34,866 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,866 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,867 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,868 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:34,869 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName A 2024-12-11T02:27:34,869 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:34,869 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:34,869 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,870 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:34,870 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName B 2024-12-11T02:27:34,870 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:34,870 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:34,871 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,871 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:34,871 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName C 2024-12-11T02:27:34,871 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:34,872 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:34,872 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,872 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,873 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,874 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:27:34,875 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:34,876 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:27:34,876 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened a10f50dcc5dc0cf76420942b9469ad44; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68652176, jitterRate=0.022997140884399414}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:27:34,877 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:34,878 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., pid=96, masterSystemTime=1733884054862 2024-12-11T02:27:34,879 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,879 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:34,879 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:34,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-11T02:27:34,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 in 169 msec 2024-12-11T02:27:34,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-11T02:27:34,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, ASSIGN in 323 msec 2024-12-11T02:27:34,883 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:27:34,883 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884054883"}]},"ts":"1733884054883"} 2024-12-11T02:27:34,884 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:27:34,886 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:27:34,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-11T02:27:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-11T02:27:35,842 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-11T02:27:35,843 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-12-11T02:27:35,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:35,848 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:35,849 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:35,850 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:27:35,851 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:27:35,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:27:35,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:27:35,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T02:27:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742253_1429 (size=999) 2024-12-11T02:27:36,263 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T02:27:36,263 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T02:27:36,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:27:36,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, REOPEN/MOVE}] 2024-12-11T02:27:36,268 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, REOPEN/MOVE 2024-12-11T02:27:36,268 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,269 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:27:36,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:36,421 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,421 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,421 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:27:36,421 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing a10f50dcc5dc0cf76420942b9469ad44, disabling compactions & flushes 2024-12-11T02:27:36,421 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,421 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,421 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. after waiting 0 ms 2024-12-11T02:27:36,421 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,425 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T02:27:36,425 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,425 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:36,425 WARN [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: a10f50dcc5dc0cf76420942b9469ad44 to self. 2024-12-11T02:27:36,427 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=CLOSED 2024-12-11T02:27:36,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-11T02:27:36,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 in 159 msec 2024-12-11T02:27:36,429 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, REOPEN/MOVE; state=CLOSED, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=true 2024-12-11T02:27:36,580 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:27:36,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,735 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,735 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:27:36,736 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,736 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:27:36,736 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,736 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,737 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,738 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:36,738 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName A 2024-12-11T02:27:36,739 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:36,740 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:36,740 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,740 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:36,740 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName B 2024-12-11T02:27:36,741 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:36,741 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:36,741 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,741 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:27:36,742 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a10f50dcc5dc0cf76420942b9469ad44 columnFamilyName C 2024-12-11T02:27:36,742 DEBUG [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:36,742 INFO [StoreOpener-a10f50dcc5dc0cf76420942b9469ad44-1 {}] regionserver.HStore(327): Store=a10f50dcc5dc0cf76420942b9469ad44/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:27:36,742 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,743 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,744 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,745 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:27:36,746 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,746 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened a10f50dcc5dc0cf76420942b9469ad44; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59767480, jitterRate=-0.10939514636993408}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:27:36,747 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:36,747 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., pid=101, masterSystemTime=1733884056732 2024-12-11T02:27:36,749 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,749 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,749 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=OPEN, openSeqNum=5, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-11T02:27:36,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 in 169 msec 2024-12-11T02:27:36,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-11T02:27:36,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, REOPEN/MOVE in 485 msec 2024-12-11T02:27:36,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-11T02:27:36,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 488 msec 2024-12-11T02:27:36,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 902 msec 2024-12-11T02:27:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T02:27:36,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-12-11T02:27:36,772 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,773 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-12-11T02:27:36,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,777 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-12-11T02:27:36,780 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,780 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-12-11T02:27:36,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,784 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-12-11T02:27:36,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,787 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-12-11T02:27:36,790 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,790 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-12-11T02:27:36,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,794 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-12-11T02:27:36,797 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-12-11T02:27:36,802 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,803 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4cb9e50e to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3eab689a 2024-12-11T02:27:36,806 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39387e4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:27:36,811 DEBUG [hconnection-0x4d417bb7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,812 DEBUG [hconnection-0x45f602af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,813 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:36,813 DEBUG [hconnection-0x5a26ccd1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,813 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53960, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,813 DEBUG [hconnection-0x6e0fbe11-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,813 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-11T02:27:36,813 DEBUG [hconnection-0x6eea1313-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,814 DEBUG [hconnection-0x2725bcaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,814 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,815 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:36,815 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,815 DEBUG [hconnection-0x711f0a08-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,815 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:36,815 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:36,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:36,816 DEBUG [hconnection-0x6f730b52-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,816 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,816 DEBUG [hconnection-0x4d6c63fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,817 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,817 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,818 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,818 DEBUG [hconnection-0x79313c36-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:27:36,820 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:27:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:36,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:36,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:36,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884116845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884116845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884116849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884116850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884116849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211862a58a6b6e541efb8e527a80e3dddf9_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884056823/Put/seqid=0 2024-12-11T02:27:36,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742254_1430 (size=12154) 2024-12-11T02:27:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:36,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884116950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884116951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884116951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884116951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:36,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884116956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:36,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:36,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:37,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,153 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:27:37,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884117152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884117153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884117153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884117155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884117158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,259 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:37,263 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211862a58a6b6e541efb8e527a80e3dddf9_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211862a58a6b6e541efb8e527a80e3dddf9_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:37,263 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/661d28466f2a45c8a722b529d39bf8c3, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:37,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/661d28466f2a45c8a722b529d39bf8c3 is 175, key is test_row_0/A:col10/1733884056823/Put/seqid=0 2024-12-11T02:27:37,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742255_1431 (size=30955) 2024-12-11T02:27:37,275 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:37,428 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884117455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884117456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884117457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884117460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884117464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,673 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/661d28466f2a45c8a722b529d39bf8c3 2024-12-11T02:27:37,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/87be517c45d34a44a0ce7d4d162136dd is 50, key is test_row_0/B:col10/1733884056823/Put/seqid=0 2024-12-11T02:27:37,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742256_1432 (size=12001) 2024-12-11T02:27:37,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,886 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:37,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:37,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:37,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:37,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884117961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884117962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884117964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884117966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:37,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884117968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,039 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:38,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:38,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/87be517c45d34a44a0ce7d4d162136dd 2024-12-11T02:27:38,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9deea7af99ff456285eed88a682e7676 is 50, key is test_row_0/C:col10/1733884056823/Put/seqid=0 2024-12-11T02:27:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742257_1433 (size=12001) 2024-12-11T02:27:38,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:38,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:38,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:38,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:38,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,497 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:38,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:38,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:38,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9deea7af99ff456285eed88a682e7676 2024-12-11T02:27:38,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/661d28466f2a45c8a722b529d39bf8c3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3 2024-12-11T02:27:38,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3, entries=150, sequenceid=16, filesize=30.2 K 2024-12-11T02:27:38,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/87be517c45d34a44a0ce7d4d162136dd as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd 2024-12-11T02:27:38,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd, entries=150, sequenceid=16, filesize=11.7 K 2024-12-11T02:27:38,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9deea7af99ff456285eed88a682e7676 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676 2024-12-11T02:27:38,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676, entries=150, sequenceid=16, filesize=11.7 K 2024-12-11T02:27:38,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a10f50dcc5dc0cf76420942b9469ad44 in 1754ms, sequenceid=16, compaction requested=false 2024-12-11T02:27:38,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:38,650 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-11T02:27:38,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:38,651 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:38,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:38,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c7b87b3ecb3e415380dbe77ba356e789_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884056847/Put/seqid=0 2024-12-11T02:27:38,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742258_1434 (size=12154) 2024-12-11T02:27:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:38,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:38,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884118980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884118982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884118983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884118986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:38,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884118987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:39,079 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c7b87b3ecb3e415380dbe77ba356e789_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c7b87b3ecb3e415380dbe77ba356e789_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:39,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/af8b907e05d04016a7d8507c16b51a94, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:39,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/af8b907e05d04016a7d8507c16b51a94 is 175, key is test_row_0/A:col10/1733884056847/Put/seqid=0 2024-12-11T02:27:39,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742259_1435 (size=30955) 2024-12-11T02:27:39,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884119088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884119088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884119090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884119091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884119091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884119291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884119291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884119294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884119295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884119295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,486 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/af8b907e05d04016a7d8507c16b51a94 2024-12-11T02:27:39,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/9d0462a832af478889b2468b15d28648 is 50, key is test_row_0/B:col10/1733884056847/Put/seqid=0 2024-12-11T02:27:39,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742260_1436 (size=12001) 2024-12-11T02:27:39,498 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/9d0462a832af478889b2468b15d28648 2024-12-11T02:27:39,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d45af2a8f8a2489b8802e36228a9645c is 50, key is test_row_0/C:col10/1733884056847/Put/seqid=0 2024-12-11T02:27:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742261_1437 (size=12001) 2024-12-11T02:27:39,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884119595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884119596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884119600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884119601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884119602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:39,914 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d45af2a8f8a2489b8802e36228a9645c 2024-12-11T02:27:39,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/af8b907e05d04016a7d8507c16b51a94 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94 2024-12-11T02:27:39,924 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94, entries=150, sequenceid=40, filesize=30.2 K 2024-12-11T02:27:39,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/9d0462a832af478889b2468b15d28648 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648 2024-12-11T02:27:39,932 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T02:27:39,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d45af2a8f8a2489b8802e36228a9645c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c 2024-12-11T02:27:39,936 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T02:27:39,937 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for a10f50dcc5dc0cf76420942b9469ad44 in 1286ms, sequenceid=40, compaction requested=false 2024-12-11T02:27:39,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:39,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:39,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-11T02:27:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-11T02:27:39,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-11T02:27:39,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1240 sec 2024-12-11T02:27:39,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 3.1280 sec 2024-12-11T02:27:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:40,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:40,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c1b8162c0b0f4fe2ab4e5ad17fb09285_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:40,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742262_1438 (size=17034) 2024-12-11T02:27:40,124 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:40,128 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c1b8162c0b0f4fe2ab4e5ad17fb09285_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c1b8162c0b0f4fe2ab4e5ad17fb09285_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:40,133 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/958ea41b992b489d95920fecd64a594f, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:40,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/958ea41b992b489d95920fecd64a594f is 175, key is test_row_0/A:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:40,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884120134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884120135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884120137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884120138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884120139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742263_1439 (size=48139) 2024-12-11T02:27:40,146 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/958ea41b992b489d95920fecd64a594f 2024-12-11T02:27:40,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fa8ea021398e4523941f41bbc07e5655 is 50, key is test_row_0/B:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742264_1440 (size=12001) 2024-12-11T02:27:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884120239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884120239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884120240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884120244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884120245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884120445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884120446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884120446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884120447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884120448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fa8ea021398e4523941f41bbc07e5655 2024-12-11T02:27:40,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 is 50, key is test_row_0/C:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:40,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742265_1441 (size=12001) 2024-12-11T02:27:40,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 2024-12-11T02:27:40,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/958ea41b992b489d95920fecd64a594f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f 2024-12-11T02:27:40,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f, entries=250, sequenceid=55, filesize=47.0 K 2024-12-11T02:27:40,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fa8ea021398e4523941f41bbc07e5655 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655 2024-12-11T02:27:40,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655, entries=150, sequenceid=55, filesize=11.7 K 2024-12-11T02:27:40,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 2024-12-11T02:27:40,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510, entries=150, sequenceid=55, filesize=11.7 K 2024-12-11T02:27:40,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for a10f50dcc5dc0cf76420942b9469ad44 in 492ms, sequenceid=55, compaction requested=true 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:40,598 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:40,598 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:40,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:40,599 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:40,599 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:40,599 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:40,599 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:40,599 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:40,599 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.2 K 2024-12-11T02:27:40,600 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:40,600 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=107.5 K 2024-12-11T02:27:40,600 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:40,600 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f] 2024-12-11T02:27:40,600 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 87be517c45d34a44a0ce7d4d162136dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733884056820 2024-12-11T02:27:40,601 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 661d28466f2a45c8a722b529d39bf8c3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733884056820 2024-12-11T02:27:40,601 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d0462a832af478889b2468b15d28648, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884056844 2024-12-11T02:27:40,601 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting af8b907e05d04016a7d8507c16b51a94, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884056844 2024-12-11T02:27:40,601 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fa8ea021398e4523941f41bbc07e5655, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:40,601 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 958ea41b992b489d95920fecd64a594f, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:40,608 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#378 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:40,609 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/36cb6c73d06f4f67a0574704ffcf5734 is 50, key is test_row_0/B:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:40,610 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:40,612 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211bc687674fbca4f3ab765512751b1739c_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:40,615 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211bc687674fbca4f3ab765512751b1739c_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:40,615 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bc687674fbca4f3ab765512751b1739c_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:40,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742266_1442 (size=12104) 2024-12-11T02:27:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742267_1443 (size=4469) 2024-12-11T02:27:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:40,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:27:40,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:40,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173735a99b2a84b6bb459efd3b1ba7948_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884060133/Put/seqid=0 2024-12-11T02:27:40,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742268_1444 (size=12154) 2024-12-11T02:27:40,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884120796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884120803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884120807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884120806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884120804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884120908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-11T02:27:40,921 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-11T02:27:40,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-11T02:27:40,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884120917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884120918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,923 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884120918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:40,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884120918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:40,924 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:40,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:41,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:41,025 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/36cb6c73d06f4f67a0574704ffcf5734 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36cb6c73d06f4f67a0574704ffcf5734 2024-12-11T02:27:41,030 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#379 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:41,030 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/03fc790764fd4bc088914847e2d42686 is 175, key is test_row_0/A:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:41,031 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into 36cb6c73d06f4f67a0574704ffcf5734(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:41,031 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:41,031 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=13, startTime=1733884060598; duration=0sec 2024-12-11T02:27:41,032 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:41,032 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:41,032 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:41,034 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:41,034 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:41,034 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,034 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.2 K 2024-12-11T02:27:41,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742269_1445 (size=31058) 2024-12-11T02:27:41,035 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9deea7af99ff456285eed88a682e7676, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733884056820 2024-12-11T02:27:41,036 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d45af2a8f8a2489b8802e36228a9645c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884056844 2024-12-11T02:27:41,036 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcef57b1e8f4eefaa2e6dd0a0d33510, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:41,041 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/03fc790764fd4bc088914847e2d42686 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686 2024-12-11T02:27:41,046 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#381 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:41,046 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d7eb109c4386455d86256473f4a1e815 is 50, key is test_row_0/C:col10/1733884060106/Put/seqid=0 2024-12-11T02:27:41,049 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into 03fc790764fd4bc088914847e2d42686(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:41,049 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:41,049 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=13, startTime=1733884060598; duration=0sec 2024-12-11T02:27:41,049 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:41,050 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:41,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742270_1446 (size=12104) 2024-12-11T02:27:41,076 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884121112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884121124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884121125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884121125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884121126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,182 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:41,186 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173735a99b2a84b6bb459efd3b1ba7948_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173735a99b2a84b6bb459efd3b1ba7948_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:41,187 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/c87b8bd9653f41d68a7ae797ed614b30, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:41,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/c87b8bd9653f41d68a7ae797ed614b30 is 175, key is test_row_0/A:col10/1733884060133/Put/seqid=0 2024-12-11T02:27:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742271_1447 (size=30955) 2024-12-11T02:27:41,192 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/c87b8bd9653f41d68a7ae797ed614b30 2024-12-11T02:27:41,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/6a9e69339a7649d2894964e7b14d60b8 is 50, key is test_row_0/B:col10/1733884060133/Put/seqid=0 2024-12-11T02:27:41,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742272_1448 (size=12001) 2024-12-11T02:27:41,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:41,228 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884121418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884121430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884121430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884121431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884121432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,457 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d7eb109c4386455d86256473f4a1e815 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d7eb109c4386455d86256473f4a1e815 2024-12-11T02:27:41,462 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into d7eb109c4386455d86256473f4a1e815(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:41,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:41,462 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=13, startTime=1733884060598; duration=0sec 2024-12-11T02:27:41,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:41,462 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:27:41,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:41,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/6a9e69339a7649d2894964e7b14d60b8 2024-12-11T02:27:41,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/8a59f0f89f49448795b0b4288af602c8 is 50, key is test_row_0/C:col10/1733884060133/Put/seqid=0 2024-12-11T02:27:41,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742273_1449 (size=12001) 2024-12-11T02:27:41,687 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884121925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884121937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884121938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884121940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:41,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884121940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,993 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:41,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:41,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:42,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/8a59f0f89f49448795b0b4288af602c8 2024-12-11T02:27:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/c87b8bd9653f41d68a7ae797ed614b30 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30 2024-12-11T02:27:42,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30, entries=150, sequenceid=79, filesize=30.2 K 2024-12-11T02:27:42,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/6a9e69339a7649d2894964e7b14d60b8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8 2024-12-11T02:27:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:42,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8, entries=150, sequenceid=79, filesize=11.7 K 2024-12-11T02:27:42,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/8a59f0f89f49448795b0b4288af602c8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8 2024-12-11T02:27:42,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8, entries=150, sequenceid=79, filesize=11.7 K 2024-12-11T02:27:42,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a10f50dcc5dc0cf76420942b9469ad44 in 1277ms, sequenceid=79, compaction requested=false 2024-12-11T02:27:42,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:42,146 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:42,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:42,147 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:42,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ac64c455b9040c7bcbd23cef268b6f7_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884060802/Put/seqid=0 2024-12-11T02:27:42,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742274_1450 (size=12154) 2024-12-11T02:27:42,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:42,179 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ac64c455b9040c7bcbd23cef268b6f7_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ac64c455b9040c7bcbd23cef268b6f7_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:42,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/1b2b449c29d841c582267ac47fda0f5d, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:42,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/1b2b449c29d841c582267ac47fda0f5d is 175, key is test_row_0/A:col10/1733884060802/Put/seqid=0 2024-12-11T02:27:42,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742275_1451 (size=30955) 2024-12-11T02:27:42,238 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:27:42,586 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/1b2b449c29d841c582267ac47fda0f5d 2024-12-11T02:27:42,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/b6b13be5078a4db784ed404fb0916bfc is 50, key is test_row_0/B:col10/1733884060802/Put/seqid=0 2024-12-11T02:27:42,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742276_1452 (size=12001) 2024-12-11T02:27:42,597 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/b6b13be5078a4db784ed404fb0916bfc 2024-12-11T02:27:42,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/89f65450664f43a7acf3ef0217fcb1e7 is 50, key is test_row_0/C:col10/1733884060802/Put/seqid=0 2024-12-11T02:27:42,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742277_1453 (size=12001) 2024-12-11T02:27:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:42,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:42,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884122969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:42,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884122969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:42,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884122975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:42,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884122976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:42,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884122978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,027 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/89f65450664f43a7acf3ef0217fcb1e7 2024-12-11T02:27:43,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:43,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/1b2b449c29d841c582267ac47fda0f5d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d 2024-12-11T02:27:43,036 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d, entries=150, sequenceid=94, filesize=30.2 K 2024-12-11T02:27:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-11T02:27:43,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/b6b13be5078a4db784ed404fb0916bfc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc 2024-12-11T02:27:43,040 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc, entries=150, sequenceid=94, filesize=11.7 K 2024-12-11T02:27:43,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/89f65450664f43a7acf3ef0217fcb1e7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7 2024-12-11T02:27:43,044 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7, entries=150, sequenceid=94, filesize=11.7 K 2024-12-11T02:27:43,045 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a10f50dcc5dc0cf76420942b9469ad44 in 898ms, sequenceid=94, compaction requested=true 2024-12-11T02:27:43,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:43,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:43,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-11T02:27:43,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-11T02:27:43,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-11T02:27:43,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1220 sec 2024-12-11T02:27:43,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.1260 sec 2024-12-11T02:27:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:43,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:27:43,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:43,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:43,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:43,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:43,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:43,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:43,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117fe879c4574c4af9ae9313efd7b6f442_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884123092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884123093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884123095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884123096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884123096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742278_1454 (size=14594) 2024-12-11T02:27:43,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884123198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884123198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884123202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884123202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884123202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884123402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884123403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884123408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884123409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884123410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,502 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:43,506 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117fe879c4574c4af9ae9313efd7b6f442_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117fe879c4574c4af9ae9313efd7b6f442_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:43,507 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0a83fa560d44ab8a6a7c6e3243ea1c9, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:43,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 is 175, key is test_row_0/A:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:43,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742279_1455 (size=39549) 2024-12-11T02:27:43,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884123708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884123708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884123712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884123714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884123715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:43,912 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 2024-12-11T02:27:43,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/ab8fa9c5c9064f74a3812f96862af937 is 50, key is test_row_0/B:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:43,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742280_1456 (size=12001) 2024-12-11T02:27:44,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884124216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:44,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:44,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884124217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:44,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:44,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884124217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:44,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:44,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884124220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:44,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:44,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884124224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:44,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/ab8fa9c5c9064f74a3812f96862af937 2024-12-11T02:27:44,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/80543e0aae7443f48bbe920fdb495f98 is 50, key is test_row_0/C:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:44,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742281_1457 (size=12001) 2024-12-11T02:27:44,351 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-11T02:27:44,351 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-11T02:27:44,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/80543e0aae7443f48bbe920fdb495f98 2024-12-11T02:27:44,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 2024-12-11T02:27:44,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9, entries=200, sequenceid=120, filesize=38.6 K 2024-12-11T02:27:44,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/ab8fa9c5c9064f74a3812f96862af937 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937 2024-12-11T02:27:44,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937, entries=150, sequenceid=120, filesize=11.7 K 2024-12-11T02:27:44,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/80543e0aae7443f48bbe920fdb495f98 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98 2024-12-11T02:27:44,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98, entries=150, sequenceid=120, filesize=11.7 K 2024-12-11T02:27:44,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a10f50dcc5dc0cf76420942b9469ad44 in 1668ms, sequenceid=120, compaction requested=true 2024-12-11T02:27:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:44,753 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:44,753 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:44,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:44,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:44,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:44,754 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:44,754 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=129.4 K 2024-12-11T02:27:44,754 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:44,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9] 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:44,755 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03fc790764fd4bc088914847e2d42686, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:44,755 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36cb6c73d06f4f67a0574704ffcf5734, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=47.0 K 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c87b8bd9653f41d68a7ae797ed614b30, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733884060133 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 36cb6c73d06f4f67a0574704ffcf5734, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:44,755 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b2b449c29d841c582267ac47fda0f5d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733884060792 2024-12-11T02:27:44,756 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a9e69339a7649d2894964e7b14d60b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733884060133 2024-12-11T02:27:44,756 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0a83fa560d44ab8a6a7c6e3243ea1c9, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062975 2024-12-11T02:27:44,756 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b6b13be5078a4db784ed404fb0916bfc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733884060792 2024-12-11T02:27:44,756 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ab8fa9c5c9064f74a3812f96862af937, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062976 2024-12-11T02:27:44,762 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:44,765 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#391 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:44,766 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/ce601f7cfe8a442f928a604d12ac8198 is 50, key is test_row_0/B:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:44,768 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211793e4efbcc9c40cfa76c7bd7988506ca_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:44,770 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211793e4efbcc9c40cfa76c7bd7988506ca_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:44,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742282_1458 (size=12241) 2024-12-11T02:27:44,770 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211793e4efbcc9c40cfa76c7bd7988506ca_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:44,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742283_1459 (size=4469) 2024-12-11T02:27:44,775 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#390 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:44,775 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/14e7982660fa411cb48b3227144daa95 is 175, key is test_row_0/A:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:44,776 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/ce601f7cfe8a442f928a604d12ac8198 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ce601f7cfe8a442f928a604d12ac8198 2024-12-11T02:27:44,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742284_1460 (size=31195) 2024-12-11T02:27:44,783 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into ce601f7cfe8a442f928a604d12ac8198(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:44,783 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:44,783 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=12, startTime=1733884064753; duration=0sec 2024-12-11T02:27:44,783 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:44,783 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:44,783 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:27:44,785 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:27:44,785 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/14e7982660fa411cb48b3227144daa95 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95 2024-12-11T02:27:44,785 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:44,785 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:44,785 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d7eb109c4386455d86256473f4a1e815, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=47.0 K 2024-12-11T02:27:44,786 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d7eb109c4386455d86256473f4a1e815, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733884058978 2024-12-11T02:27:44,786 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a59f0f89f49448795b0b4288af602c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733884060133 2024-12-11T02:27:44,787 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 89f65450664f43a7acf3ef0217fcb1e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733884060792 2024-12-11T02:27:44,789 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 80543e0aae7443f48bbe920fdb495f98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062976 2024-12-11T02:27:44,791 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into 14e7982660fa411cb48b3227144daa95(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:44,791 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:44,791 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=12, startTime=1733884064753; duration=0sec 2024-12-11T02:27:44,791 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:44,791 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:44,798 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:44,799 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/ef39452c5bdd40ba82b15b5ffa5eb62d is 50, key is test_row_0/C:col10/1733884062976/Put/seqid=0 2024-12-11T02:27:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742285_1461 (size=12241) 2024-12-11T02:27:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-11T02:27:45,029 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-11T02:27:45,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-11T02:27:45,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-11T02:27:45,032 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:45,032 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:45,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:45,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-11T02:27:45,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-11T02:27:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:45,185 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-11T02:27:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:45,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:45,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412113f453b31c0734ead8bf5a17665378111_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884063094/Put/seqid=0 2024-12-11T02:27:45,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742286_1462 (size=12204) 2024-12-11T02:27:45,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:45,201 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412113f453b31c0734ead8bf5a17665378111_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113f453b31c0734ead8bf5a17665378111_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:45,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/0bf0c1387d3d48d5bb63190f7b6eeb24, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:45,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/0bf0c1387d3d48d5bb63190f7b6eeb24 is 175, key is test_row_0/A:col10/1733884063094/Put/seqid=0 2024-12-11T02:27:45,209 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/ef39452c5bdd40ba82b15b5ffa5eb62d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ef39452c5bdd40ba82b15b5ffa5eb62d 2024-12-11T02:27:45,215 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into ef39452c5bdd40ba82b15b5ffa5eb62d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:45,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:45,215 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=12, startTime=1733884064753; duration=0sec 2024-12-11T02:27:45,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:45,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:27:45,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742287_1463 (size=31005) 2024-12-11T02:27:45,223 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/0bf0c1387d3d48d5bb63190f7b6eeb24 2024-12-11T02:27:45,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:45,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:45,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fdd86e9f60784c10a5b44cd8921f395d is 50, key is test_row_0/B:col10/1733884063094/Put/seqid=0 2024-12-11T02:27:45,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742288_1464 (size=12051) 2024-12-11T02:27:45,236 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fdd86e9f60784c10a5b44cd8921f395d 2024-12-11T02:27:45,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/71a69afce7d94e19b696fecdfb5efc12 is 50, key is test_row_0/C:col10/1733884063094/Put/seqid=0 2024-12-11T02:27:45,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742289_1465 (size=12051) 2024-12-11T02:27:45,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884125289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884125289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884125290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884125291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884125295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-11T02:27:45,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884125396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884125396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884125397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884125397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884125401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884125600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884125600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884125600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884125601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884125606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-11T02:27:45,650 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/71a69afce7d94e19b696fecdfb5efc12 2024-12-11T02:27:45,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/0bf0c1387d3d48d5bb63190f7b6eeb24 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24 2024-12-11T02:27:45,657 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24, entries=150, sequenceid=132, filesize=30.3 K 2024-12-11T02:27:45,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/fdd86e9f60784c10a5b44cd8921f395d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d 2024-12-11T02:27:45,661 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d, entries=150, sequenceid=132, filesize=11.8 K 2024-12-11T02:27:45,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/71a69afce7d94e19b696fecdfb5efc12 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12 2024-12-11T02:27:45,665 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12, entries=150, sequenceid=132, filesize=11.8 K 2024-12-11T02:27:45,666 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for a10f50dcc5dc0cf76420942b9469ad44 in 481ms, sequenceid=132, compaction requested=false 2024-12-11T02:27:45,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:45,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:45,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-11T02:27:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-11T02:27:45,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-11T02:27:45,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 634 msec 2024-12-11T02:27:45,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 639 msec 2024-12-11T02:27:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:45,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:45,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:45,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d1818a636ee643959c7c48e532d35fd0_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:45,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742290_1466 (size=14794) 2024-12-11T02:27:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884125913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884125913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884125914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884125916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:45,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884125920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884126022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884126022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884126023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884126027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-11T02:27:46,135 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-11T02:27:46,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-11T02:27:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:46,138 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:46,138 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:46,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:46,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884126230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884126230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884126230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884126232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:46,290 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:46,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:46,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,321 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:46,325 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d1818a636ee643959c7c48e532d35fd0_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d1818a636ee643959c7c48e532d35fd0_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:46,326 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/cf0e326c5f984a5fbcbdad51aa0e09ec, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:46,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/cf0e326c5f984a5fbcbdad51aa0e09ec is 175, key is test_row_0/A:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:46,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742291_1467 (size=39749) 2024-12-11T02:27:46,331 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/cf0e326c5f984a5fbcbdad51aa0e09ec 2024-12-11T02:27:46,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/e93b921732fd4fd18159701b59eafd39 is 50, key is test_row_0/B:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742292_1468 (size=12151) 2024-12-11T02:27:46,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884126425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:46,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884126534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884126535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884126536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884126539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,596 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:46,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:46,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:46,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/e93b921732fd4fd18159701b59eafd39 2024-12-11T02:27:46,749 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:46,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:46,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/ec8bf089cef54db0ae7ccc858fef6492 is 50, key is test_row_0/C:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:46,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742293_1469 (size=12151) 2024-12-11T02:27:46,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:46,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:46,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:46,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:46,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:47,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884127040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884127041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884127043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884127044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,057 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:47,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:47,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:47,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:47,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/ec8bf089cef54db0ae7ccc858fef6492 2024-12-11T02:27:47,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/cf0e326c5f984a5fbcbdad51aa0e09ec as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec 2024-12-11T02:27:47,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec, entries=200, sequenceid=160, filesize=38.8 K 2024-12-11T02:27:47,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/e93b921732fd4fd18159701b59eafd39 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39 2024-12-11T02:27:47,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39, entries=150, sequenceid=160, filesize=11.9 K 2024-12-11T02:27:47,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/ec8bf089cef54db0ae7ccc858fef6492 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492 2024-12-11T02:27:47,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492, entries=150, sequenceid=160, filesize=11.9 K 2024-12-11T02:27:47,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for a10f50dcc5dc0cf76420942b9469ad44 in 1276ms, sequenceid=160, compaction requested=true 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:47,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:47,184 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:47,184 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:47,185 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,185 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,185 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=99.6 K 2024-12-11T02:27:47,185 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,185 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ce601f7cfe8a442f928a604d12ac8198, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.6 K 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec] 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14e7982660fa411cb48b3227144daa95, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062976 2024-12-11T02:27:47,185 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ce601f7cfe8a442f928a604d12ac8198, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062976 2024-12-11T02:27:47,186 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bf0c1387d3d48d5bb63190f7b6eeb24, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733884063091 2024-12-11T02:27:47,186 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fdd86e9f60784c10a5b44cd8921f395d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733884063091 2024-12-11T02:27:47,186 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf0e326c5f984a5fbcbdad51aa0e09ec, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:47,186 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e93b921732fd4fd18159701b59eafd39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:47,192 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:47,194 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:47,195 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/58853a6e8d58486da0bdb10a05fa00a9 is 50, key is test_row_0/B:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:47,195 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121136e454f910fa46b4a6495eea640aa7a6_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:47,196 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121136e454f910fa46b4a6495eea640aa7a6_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:47,196 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121136e454f910fa46b4a6495eea640aa7a6_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:47,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,210 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:47,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742294_1470 (size=12493) 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:47,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:47,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:47,215 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/58853a6e8d58486da0bdb10a05fa00a9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/58853a6e8d58486da0bdb10a05fa00a9 2024-12-11T02:27:47,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111385f0e06884436394a028ee0253d42e_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884065915/Put/seqid=0 2024-12-11T02:27:47,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742295_1471 (size=4469) 2024-12-11T02:27:47,226 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#399 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:47,227 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/9357510f961643939bf726ac30feee55 is 175, key is test_row_0/A:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:47,227 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into 58853a6e8d58486da0bdb10a05fa00a9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:47,227 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:47,227 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=13, startTime=1733884067184; duration=0sec 2024-12-11T02:27:47,227 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:47,227 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:47,227 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:47,229 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:47,229 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:47,229 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:47,229 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ef39452c5bdd40ba82b15b5ffa5eb62d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.6 K 2024-12-11T02:27:47,230 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ef39452c5bdd40ba82b15b5ffa5eb62d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733884062976 2024-12-11T02:27:47,230 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 71a69afce7d94e19b696fecdfb5efc12, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733884063091 2024-12-11T02:27:47,230 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ec8bf089cef54db0ae7ccc858fef6492, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:47,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742296_1472 (size=12304) 2024-12-11T02:27:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:47,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:47,241 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111385f0e06884436394a028ee0253d42e_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111385f0e06884436394a028ee0253d42e_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:47,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/4add79e785e24201bf38dde195e0168a, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:47,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/4add79e785e24201bf38dde195e0168a is 175, key is test_row_0/A:col10/1733884065915/Put/seqid=0 2024-12-11T02:27:47,246 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:47,246 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/f83306550e9f475088970e06e2ff2d09 is 50, key is test_row_0/C:col10/1733884065289/Put/seqid=0 2024-12-11T02:27:47,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742297_1473 (size=31447) 2024-12-11T02:27:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742298_1474 (size=31105) 2024-12-11T02:27:47,268 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/9357510f961643939bf726ac30feee55 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55 2024-12-11T02:27:47,272 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into 9357510f961643939bf726ac30feee55(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:47,272 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:47,272 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=13, startTime=1733884067184; duration=0sec 2024-12-11T02:27:47,273 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:47,273 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:47,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742299_1475 (size=12493) 2024-12-11T02:27:47,280 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/f83306550e9f475088970e06e2ff2d09 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f83306550e9f475088970e06e2ff2d09 2024-12-11T02:27:47,285 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into f83306550e9f475088970e06e2ff2d09(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:47,285 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:47,285 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=13, startTime=1733884067184; duration=0sec 2024-12-11T02:27:47,285 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:47,285 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:27:47,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:47,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:47,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884127518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884127621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:47,666 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/4add79e785e24201bf38dde195e0168a 2024-12-11T02:27:47,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a17d215fc60f4980ac5726e781bd2143 is 50, key is test_row_0/B:col10/1733884065915/Put/seqid=0 2024-12-11T02:27:47,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742300_1476 (size=12151) 2024-12-11T02:27:47,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:47,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884127826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884128045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884128060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884128060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884128060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,077 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a17d215fc60f4980ac5726e781bd2143 2024-12-11T02:27:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d63f68bd16cb4954b1c3943136ab547a is 50, key is test_row_0/C:col10/1733884065915/Put/seqid=0 2024-12-11T02:27:48,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742301_1477 (size=12151) 2024-12-11T02:27:48,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884128130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:48,487 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d63f68bd16cb4954b1c3943136ab547a 2024-12-11T02:27:48,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/4add79e785e24201bf38dde195e0168a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a 2024-12-11T02:27:48,495 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a, entries=150, sequenceid=169, filesize=30.4 K 2024-12-11T02:27:48,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a17d215fc60f4980ac5726e781bd2143 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143 2024-12-11T02:27:48,498 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143, entries=150, sequenceid=169, filesize=11.9 K 2024-12-11T02:27:48,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/d63f68bd16cb4954b1c3943136ab547a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a 2024-12-11T02:27:48,502 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a, entries=150, sequenceid=169, filesize=11.9 K 2024-12-11T02:27:48,503 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for a10f50dcc5dc0cf76420942b9469ad44 in 1293ms, sequenceid=169, compaction requested=false 2024-12-11T02:27:48,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:48,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:48,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-11T02:27:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-11T02:27:48,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-11T02:27:48,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3650 sec 2024-12-11T02:27:48,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.3700 sec 2024-12-11T02:27:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:48,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:48,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:48,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121178e0b7ec966a417992cf46797dfd428b_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742302_1478 (size=14794) 2024-12-11T02:27:48,653 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:48,657 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121178e0b7ec966a417992cf46797dfd428b_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121178e0b7ec966a417992cf46797dfd428b_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:48,658 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/dfbf356d2b7b479c813e316d4b4ad4a9, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:48,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/dfbf356d2b7b479c813e316d4b4ad4a9 is 175, key is test_row_0/A:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742303_1479 (size=39749) 2024-12-11T02:27:48,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884128667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884128774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:48,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:48,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884128976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:49,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/dfbf356d2b7b479c813e316d4b4ad4a9 2024-12-11T02:27:49,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/79d372a590ba410a9200e5e6def14fb7 is 50, key is test_row_0/B:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:49,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742304_1480 (size=12151) 2024-12-11T02:27:49,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:49,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884129282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:49,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/79d372a590ba410a9200e5e6def14fb7 2024-12-11T02:27:49,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/abb2fe4094c041478c735c0c10239365 is 50, key is test_row_0/C:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:49,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742305_1481 (size=12151) 2024-12-11T02:27:49,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/abb2fe4094c041478c735c0c10239365 2024-12-11T02:27:49,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/dfbf356d2b7b479c813e316d4b4ad4a9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9 2024-12-11T02:27:49,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9, entries=200, sequenceid=200, filesize=38.8 K 2024-12-11T02:27:49,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/79d372a590ba410a9200e5e6def14fb7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7 2024-12-11T02:27:49,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:27:49,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/abb2fe4094c041478c735c0c10239365 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365 2024-12-11T02:27:49,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T02:27:49,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for a10f50dcc5dc0cf76420942b9469ad44 in 871ms, sequenceid=200, compaction requested=true 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:49,509 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:49,509 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:49,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:49,510 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:49,510 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:49,510 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:49,510 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:49,510 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:49,510 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:49,511 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=99.9 K 2024-12-11T02:27:49,511 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/58853a6e8d58486da0bdb10a05fa00a9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.9 K 2024-12-11T02:27:49,511 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:49,511 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9] 2024-12-11T02:27:49,517 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 58853a6e8d58486da0bdb10a05fa00a9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:49,517 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9357510f961643939bf726ac30feee55, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:49,528 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a17d215fc60f4980ac5726e781bd2143, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733884065913 2024-12-11T02:27:49,528 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4add79e785e24201bf38dde195e0168a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733884065913 2024-12-11T02:27:49,529 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 79d372a590ba410a9200e5e6def14fb7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:49,529 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfbf356d2b7b479c813e316d4b4ad4a9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:49,537 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:49,539 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:49,540 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/36a32ed7790d4fe89147b23ad2214042 is 50, key is test_row_0/B:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:49,541 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211a7350f75b1c143b3945b4e4ae7664264_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:49,544 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211a7350f75b1c143b3945b4e4ae7664264_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:49,544 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211a7350f75b1c143b3945b4e4ae7664264_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:49,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742306_1482 (size=12595) 2024-12-11T02:27:49,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742307_1483 (size=4469) 2024-12-11T02:27:49,566 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#409 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:49,567 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0689af15c714ee3b0bb6e602ac9a1df is 175, key is test_row_0/A:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:49,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742308_1484 (size=31549) 2024-12-11T02:27:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:49,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:49,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:49,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:49,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:49,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:49,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:49,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:49,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211241cb0dce1c44e66a3be4ebae44f7682_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884069791/Put/seqid=0 2024-12-11T02:27:49,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742309_1485 (size=14794) 2024-12-11T02:27:49,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884129924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:49,962 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/36a32ed7790d4fe89147b23ad2214042 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36a32ed7790d4fe89147b23ad2214042 2024-12-11T02:27:49,966 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into 36a32ed7790d4fe89147b23ad2214042(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:49,966 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:49,966 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=13, startTime=1733884069509; duration=0sec 2024-12-11T02:27:49,966 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:49,966 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:49,966 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:49,967 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:49,967 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:49,967 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:49,967 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f83306550e9f475088970e06e2ff2d09, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=35.9 K 2024-12-11T02:27:49,967 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f83306550e9f475088970e06e2ff2d09, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733884065289 2024-12-11T02:27:49,968 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d63f68bd16cb4954b1c3943136ab547a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733884065913 2024-12-11T02:27:49,968 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting abb2fe4094c041478c735c0c10239365, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:49,974 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:49,975 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/864fe20712374762a9abbe10afaafb3f is 50, key is test_row_0/C:col10/1733884067491/Put/seqid=0 2024-12-11T02:27:49,978 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/d0689af15c714ee3b0bb6e602ac9a1df as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df 2024-12-11T02:27:49,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742310_1486 (size=12595) 2024-12-11T02:27:49,983 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into d0689af15c714ee3b0bb6e602ac9a1df(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:49,983 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:49,983 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=13, startTime=1733884069509; duration=0sec 2024-12-11T02:27:49,983 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:49,983 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:49,987 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/864fe20712374762a9abbe10afaafb3f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/864fe20712374762a9abbe10afaafb3f 2024-12-11T02:27:49,991 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into 864fe20712374762a9abbe10afaafb3f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:49,991 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:49,991 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=13, startTime=1733884069509; duration=0sec 2024-12-11T02:27:49,991 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:49,991 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:27:50,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884130033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884130068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,072 DEBUG [Thread-1903 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:50,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884130073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,077 DEBUG [Thread-1897 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:50,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884130081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,084 DEBUG [Thread-1901 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:50,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884130083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,087 DEBUG [Thread-1905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:50,214 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:50,219 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211241cb0dce1c44e66a3be4ebae44f7682_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211241cb0dce1c44e66a3be4ebae44f7682_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:50,220 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/6514ef1083584873a32638fccba204b1, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:50,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/6514ef1083584873a32638fccba204b1 is 175, key is test_row_0/A:col10/1733884069791/Put/seqid=0 2024-12-11T02:27:50,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742311_1487 (size=39749) 2024-12-11T02:27:50,225 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/6514ef1083584873a32638fccba204b1 2024-12-11T02:27:50,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/d2a7d7460601410b957ba4ba68e8cc7f is 50, key is test_row_0/B:col10/1733884069791/Put/seqid=0 2024-12-11T02:27:50,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742312_1488 (size=12151) 2024-12-11T02:27:50,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/d2a7d7460601410b957ba4ba68e8cc7f 2024-12-11T02:27:50,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884130235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T02:27:50,243 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-11T02:27:50,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:50,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-11T02:27:50,246 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:50,247 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:50,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:50,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:50,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/72477d41e9b44f99ab5167bf8f2c1d2a is 50, key is test_row_0/C:col10/1733884069791/Put/seqid=0 2024-12-11T02:27:50,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742313_1489 (size=12151) 2024-12-11T02:27:50,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/72477d41e9b44f99ab5167bf8f2c1d2a 2024-12-11T02:27:50,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/6514ef1083584873a32638fccba204b1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1 2024-12-11T02:27:50,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1, entries=200, sequenceid=211, filesize=38.8 K 2024-12-11T02:27:50,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/d2a7d7460601410b957ba4ba68e8cc7f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f 2024-12-11T02:27:50,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f, entries=150, sequenceid=211, filesize=11.9 K 2024-12-11T02:27:50,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/72477d41e9b44f99ab5167bf8f2c1d2a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a 2024-12-11T02:27:50,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a, entries=150, sequenceid=211, filesize=11.9 K 2024-12-11T02:27:50,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a10f50dcc5dc0cf76420942b9469ad44 in 469ms, sequenceid=211, compaction requested=false 2024-12-11T02:27:50,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:50,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:50,400 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:50,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:50,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211a2fe238b184744cfbcef2c2c1ece092a_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884069917/Put/seqid=0 2024-12-11T02:27:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742314_1490 (size=12304) 2024-12-11T02:27:50,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:50,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:50,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:50,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884130577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884130682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:50,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:50,821 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211a2fe238b184744cfbcef2c2c1ece092a_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211a2fe238b184744cfbcef2c2c1ece092a_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:50,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/5a9bf579101c47a7a16b6ffc34258c81, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:50,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/5a9bf579101c47a7a16b6ffc34258c81 is 175, key is test_row_0/A:col10/1733884069917/Put/seqid=0 2024-12-11T02:27:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742315_1491 (size=31105) 2024-12-11T02:27:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:50,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:50,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884130886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:51,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:51,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884131191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:51,249 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/5a9bf579101c47a7a16b6ffc34258c81 2024-12-11T02:27:51,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a28040049e7b454e89c168736cd5869a is 50, key is test_row_0/B:col10/1733884069917/Put/seqid=0 2024-12-11T02:27:51,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742316_1492 (size=12151) 2024-12-11T02:27:51,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:51,662 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a28040049e7b454e89c168736cd5869a 2024-12-11T02:27:51,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/eb085cd62fb04775ad47adb504f9010f is 50, key is test_row_0/C:col10/1733884069917/Put/seqid=0 2024-12-11T02:27:51,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742317_1493 (size=12151) 2024-12-11T02:27:51,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884131696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:52,073 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/eb085cd62fb04775ad47adb504f9010f 2024-12-11T02:27:52,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/5a9bf579101c47a7a16b6ffc34258c81 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81 2024-12-11T02:27:52,081 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81, entries=150, sequenceid=239, filesize=30.4 K 2024-12-11T02:27:52,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/a28040049e7b454e89c168736cd5869a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a 2024-12-11T02:27:52,085 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a, entries=150, sequenceid=239, filesize=11.9 K 2024-12-11T02:27:52,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/eb085cd62fb04775ad47adb504f9010f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f 2024-12-11T02:27:52,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f, entries=150, sequenceid=239, filesize=11.9 K 2024-12-11T02:27:52,090 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a10f50dcc5dc0cf76420942b9469ad44 in 1690ms, sequenceid=239, compaction requested=true 2024-12-11T02:27:52,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:52,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:52,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-11T02:27:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-11T02:27:52,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-11T02:27:52,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8440 sec 2024-12-11T02:27:52,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8490 sec 2024-12-11T02:27:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-11T02:27:52,352 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-11T02:27:52,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-11T02:27:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:52,355 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:52,355 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:52,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:52,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:52,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-11T02:27:52,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:52,508 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:52,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:52,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ad90f53c2224961b1f08d0b86964f73_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884070571/Put/seqid=0 2024-12-11T02:27:52,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742318_1494 (size=12304) 2024-12-11T02:27:52,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:52,522 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ad90f53c2224961b1f08d0b86964f73_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ad90f53c2224961b1f08d0b86964f73_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:52,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/339c6b707d6e471a96a76f416c2bc150, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:52,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/339c6b707d6e471a96a76f416c2bc150 is 175, key is test_row_0/A:col10/1733884070571/Put/seqid=0 2024-12-11T02:27:52,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742319_1495 (size=31105) 2024-12-11T02:27:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:52,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:52,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884132841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:52,929 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/339c6b707d6e471a96a76f416c2bc150 2024-12-11T02:27:52,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/f41c70f247da4a62963fec5d688ebeeb is 50, key is test_row_0/B:col10/1733884070571/Put/seqid=0 2024-12-11T02:27:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742320_1496 (size=12151) 2024-12-11T02:27:52,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884132947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:53,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884133151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:53,340 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/f41c70f247da4a62963fec5d688ebeeb 2024-12-11T02:27:53,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6613e9121365457ebb3074a9781787e6 is 50, key is test_row_0/C:col10/1733884070571/Put/seqid=0 2024-12-11T02:27:53,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742321_1497 (size=12151) 2024-12-11T02:27:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:53,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884133457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:53,752 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6613e9121365457ebb3074a9781787e6 2024-12-11T02:27:53,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/339c6b707d6e471a96a76f416c2bc150 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150 2024-12-11T02:27:53,759 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150, entries=150, sequenceid=250, filesize=30.4 K 2024-12-11T02:27:53,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/f41c70f247da4a62963fec5d688ebeeb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb 2024-12-11T02:27:53,764 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb, entries=150, sequenceid=250, filesize=11.9 K 2024-12-11T02:27:53,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6613e9121365457ebb3074a9781787e6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6 2024-12-11T02:27:53,768 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6, entries=150, sequenceid=250, filesize=11.9 K 2024-12-11T02:27:53,768 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a10f50dcc5dc0cf76420942b9469ad44 in 1260ms, sequenceid=250, compaction requested=true 2024-12-11T02:27:53,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:53,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:53,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-11T02:27:53,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-11T02:27:53,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-11T02:27:53,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4150 sec 2024-12-11T02:27:53,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.4190 sec 2024-12-11T02:27:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:53,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:53,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ab82155bf7d45a1989a66a0c17583b9_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:53,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742322_1498 (size=14994) 2024-12-11T02:27:54,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884134010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53990 deadline: 1733884134074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,080 DEBUG [Thread-1903 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:54,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53976 deadline: 1733884134088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53950 deadline: 1733884134088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,091 DEBUG [Thread-1901 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:54,091 DEBUG [Thread-1905 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:54,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54008 deadline: 1733884134101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,103 DEBUG [Thread-1897 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:27:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884134118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884134321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,380 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:54,383 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118ab82155bf7d45a1989a66a0c17583b9_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ab82155bf7d45a1989a66a0c17583b9_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:54,384 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/382c685c24f7468ea59c8161b6857d28, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:54,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/382c685c24f7468ea59c8161b6857d28 is 175, key is test_row_0/A:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742323_1499 (size=39949) 2024-12-11T02:27:54,389 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/382c685c24f7468ea59c8161b6857d28 2024-12-11T02:27:54,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/01e01165364e469998ac3413713fce8b is 50, key is test_row_0/B:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:54,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742324_1500 (size=12301) 2024-12-11T02:27:54,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-11T02:27:54,459 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-11T02:27:54,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:27:54,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-11T02:27:54,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:54,463 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:27:54,464 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:27:54,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:27:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:54,615 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:54,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:54,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:54,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884134628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:54,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:54,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:54,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/01e01165364e469998ac3413713fce8b 2024-12-11T02:27:54,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/10cf62ea6bf44183a57dfd5feec0a7fe is 50, key is test_row_0/C:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:54,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742325_1501 (size=12301) 2024-12-11T02:27:54,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:54,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:54,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:54,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:54,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:54,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:55,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:55,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:55,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:55,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884135137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:55,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/10cf62ea6bf44183a57dfd5feec0a7fe 2024-12-11T02:27:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/382c685c24f7468ea59c8161b6857d28 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28 2024-12-11T02:27:55,227 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:55,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:27:55,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28, entries=200, sequenceid=276, filesize=39.0 K 2024-12-11T02:27:55,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/01e01165364e469998ac3413713fce8b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b 2024-12-11T02:27:55,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T02:27:55,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/10cf62ea6bf44183a57dfd5feec0a7fe as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe 2024-12-11T02:27:55,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T02:27:55,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a10f50dcc5dc0cf76420942b9469ad44 in 1273ms, sequenceid=276, compaction requested=true 2024-12-11T02:27:55,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:55,242 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:27:55,242 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:55,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:55,243 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 173457 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:27:55,243 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:55,243 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61349 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:27:55,243 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,243 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:55,243 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,243 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=169.4 K 2024-12-11T02:27:55,243 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36a32ed7790d4fe89147b23ad2214042, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=59.9 K 2024-12-11T02:27:55,243 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,243 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28] 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a32ed7790d4fe89147b23ad2214042, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0689af15c714ee3b0bb6e602ac9a1df, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d2a7d7460601410b957ba4ba68e8cc7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733884068666 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6514ef1083584873a32638fccba204b1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733884068666 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a28040049e7b454e89c168736cd5869a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733884069913 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a9bf579101c47a7a16b6ffc34258c81, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733884069913 2024-12-11T02:27:55,244 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 339c6b707d6e471a96a76f416c2bc150, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733884070568 2024-12-11T02:27:55,245 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f41c70f247da4a62963fec5d688ebeeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733884070568 2024-12-11T02:27:55,245 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 382c685c24f7468ea59c8161b6857d28, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:55,245 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 01e01165364e469998ac3413713fce8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:55,253 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:55,255 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#423 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:55,255 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/aacbfdb4fb9d4ac992725049020db66a is 50, key is test_row_0/B:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:55,257 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121176106ba574664d5e8a45c040be6a2699_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:55,259 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121176106ba574664d5e8a45c040be6a2699_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:55,259 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121176106ba574664d5e8a45c040be6a2699_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:55,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742326_1502 (size=12915) 2024-12-11T02:27:55,268 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/aacbfdb4fb9d4ac992725049020db66a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/aacbfdb4fb9d4ac992725049020db66a 2024-12-11T02:27:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742327_1503 (size=4469) 2024-12-11T02:27:55,270 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#424 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:55,271 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/b442e2fc3fe74d82810f16511ccf7a27 is 175, key is test_row_0/A:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:55,274 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into aacbfdb4fb9d4ac992725049020db66a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:55,274 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:55,274 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=11, startTime=1733884075242; duration=0sec 2024-12-11T02:27:55,274 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:55,274 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:55,274 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:27:55,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742328_1504 (size=31869) 2024-12-11T02:27:55,282 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61349 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:27:55,282 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:55,282 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,282 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/864fe20712374762a9abbe10afaafb3f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=59.9 K 2024-12-11T02:27:55,283 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 864fe20712374762a9abbe10afaafb3f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733884067491 2024-12-11T02:27:55,283 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 72477d41e9b44f99ab5167bf8f2c1d2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733884068666 2024-12-11T02:27:55,283 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting eb085cd62fb04775ad47adb504f9010f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733884069913 2024-12-11T02:27:55,284 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6613e9121365457ebb3074a9781787e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733884070568 2024-12-11T02:27:55,284 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 10cf62ea6bf44183a57dfd5feec0a7fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:55,292 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#425 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:55,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/80838f34036541f496148630a909ca74 is 50, key is test_row_0/C:col10/1733884072830/Put/seqid=0 2024-12-11T02:27:55,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742329_1505 (size=12915) 2024-12-11T02:27:55,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:27:55,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:55,381 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:55,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c9378334d90a48a19b337c4554404ad7_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884073988/Put/seqid=0 2024-12-11T02:27:55,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742330_1506 (size=12454) 2024-12-11T02:27:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:55,680 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/b442e2fc3fe74d82810f16511ccf7a27 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27 2024-12-11T02:27:55,684 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into b442e2fc3fe74d82810f16511ccf7a27(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:55,684 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:55,685 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=11, startTime=1733884075242; duration=0sec 2024-12-11T02:27:55,685 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:55,685 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:55,703 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/80838f34036541f496148630a909ca74 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80838f34036541f496148630a909ca74 2024-12-11T02:27:55,708 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into 80838f34036541f496148630a909ca74(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:55,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:55,708 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=11, startTime=1733884075242; duration=0sec 2024-12-11T02:27:55,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:55,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:27:55,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:55,797 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c9378334d90a48a19b337c4554404ad7_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c9378334d90a48a19b337c4554404ad7_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:55,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/fd2abf095e1b4a24ba949b194d812f00, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:55,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/fd2abf095e1b4a24ba949b194d812f00 is 175, key is test_row_0/A:col10/1733884073988/Put/seqid=0 2024-12-11T02:27:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742331_1507 (size=31255) 2024-12-11T02:27:56,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:56,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. as already flushing 2024-12-11T02:27:56,202 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/fd2abf095e1b4a24ba949b194d812f00 2024-12-11T02:27:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/3c01dcaa949b4252896601a17fe4a627 is 50, key is test_row_0/B:col10/1733884073988/Put/seqid=0 2024-12-11T02:27:56,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742332_1508 (size=12301) 2024-12-11T02:27:56,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:56,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884136242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:56,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:56,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884136347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:56,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:56,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884136552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:56,614 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/3c01dcaa949b4252896601a17fe4a627 2024-12-11T02:27:56,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9211df394d1a40bc912ffda73b33845d is 50, key is test_row_0/C:col10/1733884073988/Put/seqid=0 2024-12-11T02:27:56,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742333_1509 (size=12301) 2024-12-11T02:27:56,813 DEBUG [Thread-1912 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:63149 2024-12-11T02:27:56,813 DEBUG [Thread-1912 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:56,814 DEBUG [Thread-1910 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:63149 2024-12-11T02:27:56,814 DEBUG [Thread-1910 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:56,817 DEBUG [Thread-1908 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:63149 2024-12-11T02:27:56,817 DEBUG [Thread-1908 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:56,817 DEBUG [Thread-1916 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4cb9e50e to 127.0.0.1:63149 2024-12-11T02:27:56,817 DEBUG [Thread-1916 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:56,818 DEBUG [Thread-1914 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:63149 2024-12-11T02:27:56,818 DEBUG [Thread-1914 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:56,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:27:56,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1733884136856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:27:57,033 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9211df394d1a40bc912ffda73b33845d 2024-12-11T02:27:57,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/fd2abf095e1b4a24ba949b194d812f00 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00 2024-12-11T02:27:57,040 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00, entries=150, sequenceid=287, filesize=30.5 K 2024-12-11T02:27:57,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/3c01dcaa949b4252896601a17fe4a627 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627 2024-12-11T02:27:57,043 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627, entries=150, sequenceid=287, filesize=12.0 K 2024-12-11T02:27:57,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/9211df394d1a40bc912ffda73b33845d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d 2024-12-11T02:27:57,047 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d, entries=150, sequenceid=287, filesize=12.0 K 2024-12-11T02:27:57,048 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for a10f50dcc5dc0cf76420942b9469ad44 in 1666ms, sequenceid=287, compaction requested=false 2024-12-11T02:27:57,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:57,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:57,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-11T02:27:57,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-11T02:27:57,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-11T02:27:57,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5850 sec 2024-12-11T02:27:57,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.5900 sec 2024-12-11T02:27:57,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:57,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:27:57,360 DEBUG [Thread-1899 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:63149 2024-12-11T02:27:57,360 DEBUG [Thread-1899 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:27:57,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:27:57,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e96fd5e5d3524f0e8fdaefed76c7860b_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:57,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742334_1510 (size=12454) 2024-12-11T02:27:57,769 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:27:57,772 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e96fd5e5d3524f0e8fdaefed76c7860b_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e96fd5e5d3524f0e8fdaefed76c7860b_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:27:57,773 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/833c69d0e7d54e33bdedcac6317edb31, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:57,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/833c69d0e7d54e33bdedcac6317edb31 is 175, key is test_row_0/A:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:57,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742335_1511 (size=31255) 2024-12-11T02:27:58,177 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/833c69d0e7d54e33bdedcac6317edb31 2024-12-11T02:27:58,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/4b51db0020184fefa0ef14d92ff45403 is 50, key is test_row_0/B:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:58,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742336_1512 (size=12301) 2024-12-11T02:27:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-11T02:27:58,567 INFO [Thread-1907 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-11T02:27:58,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/4b51db0020184fefa0ef14d92ff45403 2024-12-11T02:27:58,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6c14d4c063a9454c9b0734bb72f96ca2 is 50, key is test_row_0/C:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:58,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742337_1513 (size=12301) 2024-12-11T02:27:58,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6c14d4c063a9454c9b0734bb72f96ca2 2024-12-11T02:27:59,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/833c69d0e7d54e33bdedcac6317edb31 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31 2024-12-11T02:27:59,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31, entries=150, sequenceid=316, filesize=30.5 K 2024-12-11T02:27:59,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/4b51db0020184fefa0ef14d92ff45403 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403 2024-12-11T02:27:59,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403, entries=150, sequenceid=316, filesize=12.0 K 2024-12-11T02:27:59,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/6c14d4c063a9454c9b0734bb72f96ca2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2 2024-12-11T02:27:59,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2, entries=150, sequenceid=316, filesize=12.0 K 2024-12-11T02:27:59,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=0 B/0 for a10f50dcc5dc0cf76420942b9469ad44 in 1652ms, sequenceid=316, compaction requested=true 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a10f50dcc5dc0cf76420942b9469ad44:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:27:59,011 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:59,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:59,011 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:59,012 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:59,012 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:59,012 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/A is initiating minor compaction (all files) 2024-12-11T02:27:59,012 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/B is initiating minor compaction (all files) 2024-12-11T02:27:59,012 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/A in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:59,012 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/B in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:59,012 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=92.2 K 2024-12-11T02:27:59,012 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/aacbfdb4fb9d4ac992725049020db66a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=36.6 K 2024-12-11T02:27:59,012 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:59,012 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31] 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting aacbfdb4fb9d4ac992725049020db66a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting b442e2fc3fe74d82810f16511ccf7a27, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c01dcaa949b4252896601a17fe4a627, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733884073988 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd2abf095e1b4a24ba949b194d812f00, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733884073988 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b51db0020184fefa0ef14d92ff45403, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733884076195 2024-12-11T02:27:59,013 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 833c69d0e7d54e33bdedcac6317edb31, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733884076195 2024-12-11T02:27:59,019 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:59,020 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#B#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:59,020 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/15880461402d4dfeb1a7cba0fbe8c6c8 is 50, key is test_row_0/B:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:59,021 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211554498c6326f492c82cf3d97b95a2075_a10f50dcc5dc0cf76420942b9469ad44 store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:59,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742338_1514 (size=13017) 2024-12-11T02:27:59,024 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211554498c6326f492c82cf3d97b95a2075_a10f50dcc5dc0cf76420942b9469ad44, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:59,024 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211554498c6326f492c82cf3d97b95a2075_a10f50dcc5dc0cf76420942b9469ad44 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:27:59,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742339_1515 (size=4469) 2024-12-11T02:27:59,428 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/15880461402d4dfeb1a7cba0fbe8c6c8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/15880461402d4dfeb1a7cba0fbe8c6c8 2024-12-11T02:27:59,429 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#A#compaction#433 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:59,429 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3e613a21f766423692ea9ca7ecb4f5b7 is 175, key is test_row_0/A:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:59,431 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/B of a10f50dcc5dc0cf76420942b9469ad44 into 15880461402d4dfeb1a7cba0fbe8c6c8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:59,431 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:59,431 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/B, priority=13, startTime=1733884079011; duration=0sec 2024-12-11T02:27:59,432 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:27:59,432 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:B 2024-12-11T02:27:59,432 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:27:59,432 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:27:59,433 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): a10f50dcc5dc0cf76420942b9469ad44/C is initiating minor compaction (all files) 2024-12-11T02:27:59,433 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a10f50dcc5dc0cf76420942b9469ad44/C in TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:27:59,433 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80838f34036541f496148630a909ca74, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp, totalSize=36.6 K 2024-12-11T02:27:59,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742340_1516 (size=31971) 2024-12-11T02:27:59,433 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 80838f34036541f496148630a909ca74, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733884072767 2024-12-11T02:27:59,433 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9211df394d1a40bc912ffda73b33845d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733884073988 2024-12-11T02:27:59,434 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c14d4c063a9454c9b0734bb72f96ca2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733884076195 2024-12-11T02:27:59,439 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a10f50dcc5dc0cf76420942b9469ad44#C#compaction#434 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:27:59,440 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/a65fc90e8d9549c0b36a1c9ababfba3c is 50, key is test_row_0/C:col10/1733884076237/Put/seqid=0 2024-12-11T02:27:59,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742341_1517 (size=13017) 2024-12-11T02:27:59,837 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3e613a21f766423692ea9ca7ecb4f5b7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3e613a21f766423692ea9ca7ecb4f5b7 2024-12-11T02:27:59,841 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/A of a10f50dcc5dc0cf76420942b9469ad44 into 3e613a21f766423692ea9ca7ecb4f5b7(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:59,841 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:59,841 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/A, priority=13, startTime=1733884079011; duration=0sec 2024-12-11T02:27:59,841 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:59,841 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:A 2024-12-11T02:27:59,846 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/a65fc90e8d9549c0b36a1c9ababfba3c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/a65fc90e8d9549c0b36a1c9ababfba3c 2024-12-11T02:27:59,850 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a10f50dcc5dc0cf76420942b9469ad44/C of a10f50dcc5dc0cf76420942b9469ad44 into a65fc90e8d9549c0b36a1c9ababfba3c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:27:59,850 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:27:59,850 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44., storeName=a10f50dcc5dc0cf76420942b9469ad44/C, priority=13, startTime=1733884079011; duration=0sec 2024-12-11T02:27:59,850 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:27:59,850 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a10f50dcc5dc0cf76420942b9469ad44:C 2024-12-11T02:28:02,932 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:28:04,099 DEBUG [Thread-1903 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:63149 2024-12-11T02:28:04,099 DEBUG [Thread-1903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:04,156 DEBUG [Thread-1905 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:63149 2024-12-11T02:28:04,156 DEBUG [Thread-1905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:04,157 DEBUG [Thread-1901 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:63149 2024-12-11T02:28:04,157 DEBUG [Thread-1901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:04,195 DEBUG [Thread-1897 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:63149 2024-12-11T02:28:04,195 DEBUG [Thread-1897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:04,195 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:28:04,195 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-12-11T02:28:04,195 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 139 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2502 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7506 rows 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2509 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7527 rows 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2503 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7509 rows 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2512 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7536 rows 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2512 2024-12-11T02:28:04,196 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7536 rows 2024-12-11T02:28:04,196 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:28:04,196 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:63149 2024-12-11T02:28:04,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:04,198 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:28:04,198 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:28:04,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:04,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:04,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884084203"}]},"ts":"1733884084203"} 2024-12-11T02:28:04,204 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:28:04,206 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:28:04,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:28:04,208 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, UNASSIGN}] 2024-12-11T02:28:04,209 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, UNASSIGN 2024-12-11T02:28:04,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:04,210 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:28:04,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:04,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:04,361 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:04,362 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing a10f50dcc5dc0cf76420942b9469ad44, disabling compactions & flushes 2024-12-11T02:28:04,362 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. after waiting 0 ms 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:28:04,362 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing a10f50dcc5dc0cf76420942b9469ad44 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-11T02:28:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=A 2024-12-11T02:28:04,363 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:04,363 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=B 2024-12-11T02:28:04,363 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:04,363 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a10f50dcc5dc0cf76420942b9469ad44, store=C 2024-12-11T02:28:04,363 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:04,367 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110ac72e3f8a414a8ead0db57f8cfde95c_a10f50dcc5dc0cf76420942b9469ad44 is 50, key is test_row_0/A:col10/1733884084156/Put/seqid=0 2024-12-11T02:28:04,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742342_1518 (size=9914) 2024-12-11T02:28:04,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:04,773 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:04,777 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110ac72e3f8a414a8ead0db57f8cfde95c_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110ac72e3f8a414a8ead0db57f8cfde95c_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:04,777 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3d977d1e24e946c785fb9b6bd297b749, store: [table=TestAcidGuarantees family=A region=a10f50dcc5dc0cf76420942b9469ad44] 2024-12-11T02:28:04,778 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3d977d1e24e946c785fb9b6bd297b749 is 175, key is test_row_0/A:col10/1733884084156/Put/seqid=0 2024-12-11T02:28:04,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742343_1519 (size=22561) 2024-12-11T02:28:04,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:05,182 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=326, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3d977d1e24e946c785fb9b6bd297b749 2024-12-11T02:28:05,188 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/cb6304f0dd2944eeb0ea37285639f50d is 50, key is test_row_0/B:col10/1733884084156/Put/seqid=0 2024-12-11T02:28:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742344_1520 (size=9857) 2024-12-11T02:28:05,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:05,592 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/cb6304f0dd2944eeb0ea37285639f50d 2024-12-11T02:28:05,598 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/f9e4cecb3b6c41b18e99d598ebfd7243 is 50, key is test_row_0/C:col10/1733884084156/Put/seqid=0 2024-12-11T02:28:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742345_1521 (size=9857) 2024-12-11T02:28:06,002 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/f9e4cecb3b6c41b18e99d598ebfd7243 2024-12-11T02:28:06,005 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/A/3d977d1e24e946c785fb9b6bd297b749 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3d977d1e24e946c785fb9b6bd297b749 2024-12-11T02:28:06,008 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3d977d1e24e946c785fb9b6bd297b749, entries=100, sequenceid=326, filesize=22.0 K 2024-12-11T02:28:06,009 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/B/cb6304f0dd2944eeb0ea37285639f50d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/cb6304f0dd2944eeb0ea37285639f50d 2024-12-11T02:28:06,012 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/cb6304f0dd2944eeb0ea37285639f50d, entries=100, sequenceid=326, filesize=9.6 K 2024-12-11T02:28:06,012 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/.tmp/C/f9e4cecb3b6c41b18e99d598ebfd7243 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f9e4cecb3b6c41b18e99d598ebfd7243 2024-12-11T02:28:06,015 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f9e4cecb3b6c41b18e99d598ebfd7243, entries=100, sequenceid=326, filesize=9.6 K 2024-12-11T02:28:06,016 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for a10f50dcc5dc0cf76420942b9469ad44 in 1654ms, sequenceid=326, compaction requested=false 2024-12-11T02:28:06,016 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31] to archive 2024-12-11T02:28:06,017 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:06,019 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/af8b907e05d04016a7d8507c16b51a94 2024-12-11T02:28:06,020 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/958ea41b992b489d95920fecd64a594f 2024-12-11T02:28:06,020 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/c87b8bd9653f41d68a7ae797ed614b30 2024-12-11T02:28:06,020 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/661d28466f2a45c8a722b529d39bf8c3 2024-12-11T02:28:06,020 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/03fc790764fd4bc088914847e2d42686 2024-12-11T02:28:06,020 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/1b2b449c29d841c582267ac47fda0f5d 2024-12-11T02:28:06,021 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/14e7982660fa411cb48b3227144daa95 2024-12-11T02:28:06,021 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/0bf0c1387d3d48d5bb63190f7b6eeb24 2024-12-11T02:28:06,021 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/9357510f961643939bf726ac30feee55 2024-12-11T02:28:06,021 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0a83fa560d44ab8a6a7c6e3243ea1c9 2024-12-11T02:28:06,022 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/dfbf356d2b7b479c813e316d4b4ad4a9 2024-12-11T02:28:06,022 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/4add79e785e24201bf38dde195e0168a 2024-12-11T02:28:06,022 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/cf0e326c5f984a5fbcbdad51aa0e09ec 2024-12-11T02:28:06,022 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/d0689af15c714ee3b0bb6e602ac9a1df 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/5a9bf579101c47a7a16b6ffc34258c81 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/382c685c24f7468ea59c8161b6857d28 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/b442e2fc3fe74d82810f16511ccf7a27 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/6514ef1083584873a32638fccba204b1 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/833c69d0e7d54e33bdedcac6317edb31 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/339c6b707d6e471a96a76f416c2bc150 2024-12-11T02:28:06,023 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/fd2abf095e1b4a24ba949b194d812f00 2024-12-11T02:28:06,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36cb6c73d06f4f67a0574704ffcf5734, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ce601f7cfe8a442f928a604d12ac8198, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/58853a6e8d58486da0bdb10a05fa00a9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36a32ed7790d4fe89147b23ad2214042, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/aacbfdb4fb9d4ac992725049020db66a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403] to archive 2024-12-11T02:28:06,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/9d0462a832af478889b2468b15d28648 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/b6b13be5078a4db784ed404fb0916bfc 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fa8ea021398e4523941f41bbc07e5655 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ce601f7cfe8a442f928a604d12ac8198 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ce601f7cfe8a442f928a604d12ac8198 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/87be517c45d34a44a0ce7d4d162136dd 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/ab8fa9c5c9064f74a3812f96862af937 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/6a9e69339a7649d2894964e7b14d60b8 2024-12-11T02:28:06,028 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36cb6c73d06f4f67a0574704ffcf5734 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36cb6c73d06f4f67a0574704ffcf5734 2024-12-11T02:28:06,029 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/fdd86e9f60784c10a5b44cd8921f395d 2024-12-11T02:28:06,029 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/e93b921732fd4fd18159701b59eafd39 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a17d215fc60f4980ac5726e781bd2143 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/58853a6e8d58486da0bdb10a05fa00a9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/58853a6e8d58486da0bdb10a05fa00a9 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/79d372a590ba410a9200e5e6def14fb7 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/a28040049e7b454e89c168736cd5869a 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/d2a7d7460601410b957ba4ba68e8cc7f 2024-12-11T02:28:06,030 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36a32ed7790d4fe89147b23ad2214042 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/36a32ed7790d4fe89147b23ad2214042 2024-12-11T02:28:06,031 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/f41c70f247da4a62963fec5d688ebeeb 2024-12-11T02:28:06,031 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/01e01165364e469998ac3413713fce8b 2024-12-11T02:28:06,031 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/aacbfdb4fb9d4ac992725049020db66a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/aacbfdb4fb9d4ac992725049020db66a 2024-12-11T02:28:06,031 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/3c01dcaa949b4252896601a17fe4a627 2024-12-11T02:28:06,031 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/4b51db0020184fefa0ef14d92ff45403 2024-12-11T02:28:06,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d7eb109c4386455d86256473f4a1e815, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ef39452c5bdd40ba82b15b5ffa5eb62d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f83306550e9f475088970e06e2ff2d09, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/864fe20712374762a9abbe10afaafb3f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80838f34036541f496148630a909ca74, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2] to archive 2024-12-11T02:28:06,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/bdcef57b1e8f4eefaa2e6dd0a0d33510 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9deea7af99ff456285eed88a682e7676 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d45af2a8f8a2489b8802e36228a9645c 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/8a59f0f89f49448795b0b4288af602c8 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d7eb109c4386455d86256473f4a1e815 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d7eb109c4386455d86256473f4a1e815 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80543e0aae7443f48bbe920fdb495f98 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ef39452c5bdd40ba82b15b5ffa5eb62d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ef39452c5bdd40ba82b15b5ffa5eb62d 2024-12-11T02:28:06,035 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/89f65450664f43a7acf3ef0217fcb1e7 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/d63f68bd16cb4954b1c3943136ab547a 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/71a69afce7d94e19b696fecdfb5efc12 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/abb2fe4094c041478c735c0c10239365 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/864fe20712374762a9abbe10afaafb3f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/864fe20712374762a9abbe10afaafb3f 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/ec8bf089cef54db0ae7ccc858fef6492 2024-12-11T02:28:06,037 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f83306550e9f475088970e06e2ff2d09 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f83306550e9f475088970e06e2ff2d09 2024-12-11T02:28:06,038 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/eb085cd62fb04775ad47adb504f9010f 2024-12-11T02:28:06,038 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/72477d41e9b44f99ab5167bf8f2c1d2a 2024-12-11T02:28:06,038 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80838f34036541f496148630a909ca74 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/80838f34036541f496148630a909ca74 2024-12-11T02:28:06,038 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6613e9121365457ebb3074a9781787e6 2024-12-11T02:28:06,039 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/10cf62ea6bf44183a57dfd5feec0a7fe 2024-12-11T02:28:06,039 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/9211df394d1a40bc912ffda73b33845d 2024-12-11T02:28:06,039 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/6c14d4c063a9454c9b0734bb72f96ca2 2024-12-11T02:28:06,046 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits/329.seqid, newMaxSeqId=329, maxSeqId=4 2024-12-11T02:28:06,046 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44. 2024-12-11T02:28:06,046 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for a10f50dcc5dc0cf76420942b9469ad44: 2024-12-11T02:28:06,048 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,048 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=a10f50dcc5dc0cf76420942b9469ad44, regionState=CLOSED 2024-12-11T02:28:06,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-11T02:28:06,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure a10f50dcc5dc0cf76420942b9469ad44, server=5f57a24c5131,40311,1733883964600 in 1.8390 sec 2024-12-11T02:28:06,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-11T02:28:06,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a10f50dcc5dc0cf76420942b9469ad44, UNASSIGN in 1.8420 sec 2024-12-11T02:28:06,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-11T02:28:06,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8440 sec 2024-12-11T02:28:06,053 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884086053"}]},"ts":"1733884086053"} 2024-12-11T02:28:06,054 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:28:06,056 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:28:06,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8580 sec 2024-12-11T02:28:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T02:28:06,306 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-11T02:28:06,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:28:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,308 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T02:28:06,309 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,310 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,311 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits] 2024-12-11T02:28:06,314 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3d977d1e24e946c785fb9b6bd297b749 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3d977d1e24e946c785fb9b6bd297b749 2024-12-11T02:28:06,314 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3e613a21f766423692ea9ca7ecb4f5b7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/A/3e613a21f766423692ea9ca7ecb4f5b7 2024-12-11T02:28:06,316 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/cb6304f0dd2944eeb0ea37285639f50d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/cb6304f0dd2944eeb0ea37285639f50d 2024-12-11T02:28:06,316 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/15880461402d4dfeb1a7cba0fbe8c6c8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/B/15880461402d4dfeb1a7cba0fbe8c6c8 2024-12-11T02:28:06,318 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f9e4cecb3b6c41b18e99d598ebfd7243 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/f9e4cecb3b6c41b18e99d598ebfd7243 2024-12-11T02:28:06,318 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/a65fc90e8d9549c0b36a1c9ababfba3c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/C/a65fc90e8d9549c0b36a1c9ababfba3c 2024-12-11T02:28:06,320 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits/329.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44/recovered.edits/329.seqid 2024-12-11T02:28:06,321 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,321 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:28:06,321 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:28:06,322 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T02:28:06,327 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110ac72e3f8a414a8ead0db57f8cfde95c_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110ac72e3f8a414a8ead0db57f8cfde95c_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,327 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111385f0e06884436394a028ee0253d42e_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111385f0e06884436394a028ee0253d42e_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,327 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173735a99b2a84b6bb459efd3b1ba7948_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121173735a99b2a84b6bb459efd3b1ba7948_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,327 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211241cb0dce1c44e66a3be4ebae44f7682_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211241cb0dce1c44e66a3be4ebae44f7682_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,328 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113f453b31c0734ead8bf5a17665378111_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412113f453b31c0734ead8bf5a17665378111_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,328 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121178e0b7ec966a417992cf46797dfd428b_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121178e0b7ec966a417992cf46797dfd428b_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,328 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211862a58a6b6e541efb8e527a80e3dddf9_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211862a58a6b6e541efb8e527a80e3dddf9_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,328 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117fe879c4574c4af9ae9313efd7b6f442_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117fe879c4574c4af9ae9313efd7b6f442_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,329 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ab82155bf7d45a1989a66a0c17583b9_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ab82155bf7d45a1989a66a0c17583b9_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ac64c455b9040c7bcbd23cef268b6f7_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ac64c455b9040c7bcbd23cef268b6f7_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c1b8162c0b0f4fe2ab4e5ad17fb09285_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c1b8162c0b0f4fe2ab4e5ad17fb09285_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c7b87b3ecb3e415380dbe77ba356e789_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c7b87b3ecb3e415380dbe77ba356e789_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211a2fe238b184744cfbcef2c2c1ece092a_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211a2fe238b184744cfbcef2c2c1ece092a_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d1818a636ee643959c7c48e532d35fd0_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d1818a636ee643959c7c48e532d35fd0_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c9378334d90a48a19b337c4554404ad7_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c9378334d90a48a19b337c4554404ad7_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,330 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ad90f53c2224961b1f08d0b86964f73_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118ad90f53c2224961b1f08d0b86964f73_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,331 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e96fd5e5d3524f0e8fdaefed76c7860b_a10f50dcc5dc0cf76420942b9469ad44 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e96fd5e5d3524f0e8fdaefed76c7860b_a10f50dcc5dc0cf76420942b9469ad44 2024-12-11T02:28:06,331 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:28:06,333 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,334 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:28:06,336 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:28:06,337 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,337 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:28:06,337 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733884086337"}]},"ts":"9223372036854775807"} 2024-12-11T02:28:06,339 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:28:06,339 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a10f50dcc5dc0cf76420942b9469ad44, NAME => 'TestAcidGuarantees,,1733884053735.a10f50dcc5dc0cf76420942b9469ad44.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:28:06,339 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:28:06,339 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733884086339"}]},"ts":"9223372036854775807"} 2024-12-11T02:28:06,342 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:28:06,344 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 38 msec 2024-12-11T02:28:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T02:28:06,410 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-11T02:28:06,420 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=243 (was 245), OpenFileDescriptor=453 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=375 (was 386), ProcessCount=11 (was 11), AvailableMemoryMB=4296 (was 4314) 2024-12-11T02:28:06,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=243, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=375, ProcessCount=11, AvailableMemoryMB=4296 2024-12-11T02:28:06,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:28:06,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:28:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:06,432 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:28:06,433 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:06,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-12-11T02:28:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:06,433 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:28:06,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742346_1522 (size=963) 2024-12-11T02:28:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:06,840 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:28:06,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742347_1523 (size=53) 2024-12-11T02:28:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 513ab21b2f5fe75f43e6defd51fe8517, disabling compactions & flushes 2024-12-11T02:28:07,250 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. after waiting 0 ms 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,250 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:07,251 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:28:07,251 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733884087251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733884087251"}]},"ts":"1733884087251"} 2024-12-11T02:28:07,252 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:28:07,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:28:07,253 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884087253"}]},"ts":"1733884087253"} 2024-12-11T02:28:07,254 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:28:07,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, ASSIGN}] 2024-12-11T02:28:07,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, ASSIGN 2024-12-11T02:28:07,258 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:28:07,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=513ab21b2f5fe75f43e6defd51fe8517, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:07,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure 513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:07,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:07,564 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,564 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:28:07,565 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,565 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:28:07,565 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,565 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,566 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,567 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:07,568 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 513ab21b2f5fe75f43e6defd51fe8517 columnFamilyName A 2024-12-11T02:28:07,568 DEBUG [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:07,568 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(327): Store=513ab21b2f5fe75f43e6defd51fe8517/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:07,568 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,569 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:07,570 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 513ab21b2f5fe75f43e6defd51fe8517 columnFamilyName B 2024-12-11T02:28:07,570 DEBUG [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:07,570 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(327): Store=513ab21b2f5fe75f43e6defd51fe8517/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:07,570 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,571 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:07,572 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 513ab21b2f5fe75f43e6defd51fe8517 columnFamilyName C 2024-12-11T02:28:07,572 DEBUG [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:07,572 INFO [StoreOpener-513ab21b2f5fe75f43e6defd51fe8517-1 {}] regionserver.HStore(327): Store=513ab21b2f5fe75f43e6defd51fe8517/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:07,572 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,573 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,573 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,574 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:28:07,576 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:07,577 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:28:07,578 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened 513ab21b2f5fe75f43e6defd51fe8517; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63189661, jitterRate=-0.058400675654411316}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:28:07,579 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:07,579 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., pid=123, masterSystemTime=1733884087561 2024-12-11T02:28:07,581 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,581 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:07,581 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=513ab21b2f5fe75f43e6defd51fe8517, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:07,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-11T02:28:07,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure 513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 in 172 msec 2024-12-11T02:28:07,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-11T02:28:07,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, ASSIGN in 326 msec 2024-12-11T02:28:07,585 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:28:07,585 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884087585"}]},"ts":"1733884087585"} 2024-12-11T02:28:07,586 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:28:07,588 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:28:07,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1570 sec 2024-12-11T02:28:08,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-11T02:28:08,537 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-12-11T02:28:08,538 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-11T02:28:08,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,543 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,544 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,545 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:28:08,546 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:28:08,547 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-11T02:28:08,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-11T02:28:08,554 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-12-11T02:28:08,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-12-11T02:28:08,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6050584c to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@154f0f85 2024-12-11T02:28:08,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496fe03f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,564 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-11T02:28:08,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-12-11T02:28:08,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,573 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d49886 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73d92042 2024-12-11T02:28:08,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c692575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,577 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x635b1751 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@593af048 2024-12-11T02:28:08,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbd2497, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,582 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cbfd84f to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2209c520 2024-12-11T02:28:08,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765d46a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:08,591 DEBUG [hconnection-0x6b1cf8f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,591 DEBUG [hconnection-0xfc4a02c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,593 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,593 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,594 DEBUG [hconnection-0x1fc08d72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,595 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:08,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:08,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:08,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-11T02:28:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T02:28:08,603 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:08,603 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:08,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:08,612 DEBUG [hconnection-0x38871bf3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,613 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884148616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884148616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884148616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,620 DEBUG [hconnection-0x735d49b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,622 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884148623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,631 DEBUG [hconnection-0x47b10fb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,631 DEBUG [hconnection-0x7d2d3b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,632 DEBUG [hconnection-0x3cd05064-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,632 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,632 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,632 DEBUG [hconnection-0xc65765f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f38796b7f56043d28564960f362ec3dd is 50, key is test_row_0/A:col10/1733884088597/Put/seqid=0 2024-12-11T02:28:08,633 DEBUG [hconnection-0x426b14fa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:08,633 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,633 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:08,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884148635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742348_1524 (size=12001) 2024-12-11T02:28:08,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f38796b7f56043d28564960f362ec3dd 2024-12-11T02:28:08,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0d2e6a4886634cc1bbb5317fdef00239 is 50, key is test_row_0/B:col10/1733884088597/Put/seqid=0 2024-12-11T02:28:08,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742349_1525 (size=12001) 2024-12-11T02:28:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T02:28:08,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884148717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884148724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884148736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T02:28:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:08,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T02:28:08,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T02:28:08,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:08,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:08,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:08,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:08,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884148919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884148920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884148920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884148928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:08,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884148941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T02:28:09,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:09,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0d2e6a4886634cc1bbb5317fdef00239 2024-12-11T02:28:09,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/973d81878d624dc5b14a367879e87759 is 50, key is test_row_0/C:col10/1733884088597/Put/seqid=0 2024-12-11T02:28:09,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742350_1526 (size=12001) 2024-12-11T02:28:09,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/973d81878d624dc5b14a367879e87759 2024-12-11T02:28:09,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f38796b7f56043d28564960f362ec3dd as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd 2024-12-11T02:28:09,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:28:09,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0d2e6a4886634cc1bbb5317fdef00239 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239 2024-12-11T02:28:09,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:28:09,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/973d81878d624dc5b14a367879e87759 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759 2024-12-11T02:28:09,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759, entries=150, sequenceid=13, filesize=11.7 K 2024-12-11T02:28:09,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 513ab21b2f5fe75f43e6defd51fe8517 in 516ms, sequenceid=13, compaction requested=false 2024-12-11T02:28:09,117 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T02:28:09,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:09,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T02:28:09,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T02:28:09,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,214 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:09,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/cf065a994b0a4fe48782b57705b2ca90 is 50, key is test_row_0/A:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:09,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742351_1527 (size=12001) 2024-12-11T02:28:09,223 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/cf065a994b0a4fe48782b57705b2ca90 2024-12-11T02:28:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:09,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:09,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fe8b3acd724b4c01858c5bfb04ce8ee7 is 50, key is test_row_0/B:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:09,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742352_1528 (size=12001) 2024-12-11T02:28:09,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884149236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884149237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884149238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884149238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884149246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884149342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884149343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884149343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884149344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884149546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884149546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884149547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884149548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,635 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fe8b3acd724b4c01858c5bfb04ce8ee7 2024-12-11T02:28:09,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/e0d7b064419a4bf6bd2a6f149a360c0d is 50, key is test_row_0/C:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:09,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742353_1529 (size=12001) 2024-12-11T02:28:09,646 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/e0d7b064419a4bf6bd2a6f149a360c0d 2024-12-11T02:28:09,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/cf065a994b0a4fe48782b57705b2ca90 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90 2024-12-11T02:28:09,660 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T02:28:09,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fe8b3acd724b4c01858c5bfb04ce8ee7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7 2024-12-11T02:28:09,666 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T02:28:09,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/e0d7b064419a4bf6bd2a6f149a360c0d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d 2024-12-11T02:28:09,670 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T02:28:09,670 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 513ab21b2f5fe75f43e6defd51fe8517 in 456ms, sequenceid=38, compaction requested=false 2024-12-11T02:28:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-11T02:28:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-11T02:28:09,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-11T02:28:09,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0670 sec 2024-12-11T02:28:09,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.0730 sec 2024-12-11T02:28:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T02:28:09,706 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-11T02:28:09,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-11T02:28:09,709 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T02:28:09,709 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:09,709 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:09,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:09,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:09,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/9c048ed55b904f6692b3bdeeb21d0722 is 50, key is test_row_1/A:col10/1733884089753/Put/seqid=0 2024-12-11T02:28:09,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742354_1530 (size=11997) 2024-12-11T02:28:09,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T02:28:09,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884149848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884149850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884149852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884149853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884149853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,861 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:09,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:09,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:09,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:09,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:09,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884149953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T02:28:10,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:10,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:10,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884150156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/9c048ed55b904f6692b3bdeeb21d0722 2024-12-11T02:28:10,166 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:10,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:10,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/03e98551e8cd40eea97ad14c122f0269 is 50, key is test_row_1/B:col10/1733884089753/Put/seqid=0 2024-12-11T02:28:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742355_1531 (size=9657) 2024-12-11T02:28:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T02:28:10,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:10,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:10,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884150353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884150357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884150360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884150363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884150461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,473 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:10,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:10,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/03e98551e8cd40eea97ad14c122f0269 2024-12-11T02:28:10,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/de77ef1042374c89b32ed77e54af09f6 is 50, key is test_row_1/C:col10/1733884089753/Put/seqid=0 2024-12-11T02:28:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742356_1532 (size=9657) 2024-12-11T02:28:10,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/de77ef1042374c89b32ed77e54af09f6 2024-12-11T02:28:10,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/9c048ed55b904f6692b3bdeeb21d0722 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722 2024-12-11T02:28:10,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722, entries=150, sequenceid=50, filesize=11.7 K 2024-12-11T02:28:10,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/03e98551e8cd40eea97ad14c122f0269 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269 2024-12-11T02:28:10,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269, entries=100, sequenceid=50, filesize=9.4 K 2024-12-11T02:28:10,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/de77ef1042374c89b32ed77e54af09f6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6 2024-12-11T02:28:10,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6, entries=100, sequenceid=50, filesize=9.4 K 2024-12-11T02:28:10,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 513ab21b2f5fe75f43e6defd51fe8517 in 867ms, sequenceid=50, compaction requested=true 2024-12-11T02:28:10,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:10,621 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:10,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:10,622 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:10,625 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:10,625 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:10,625 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,625 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=35.2 K 2024-12-11T02:28:10,626 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:10,626 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f38796b7f56043d28564960f362ec3dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884088597 2024-12-11T02:28:10,626 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:10,626 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,626 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=32.9 K 2024-12-11T02:28:10,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,627 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d2e6a4886634cc1bbb5317fdef00239, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884088597 2024-12-11T02:28:10,627 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf065a994b0a4fe48782b57705b2ca90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733884088613 2024-12-11T02:28:10,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,627 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,628 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fe8b3acd724b4c01858c5bfb04ce8ee7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733884088613 2024-12-11T02:28:10,628 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c048ed55b904f6692b3bdeeb21d0722, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884089236 2024-12-11T02:28:10,628 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 03e98551e8cd40eea97ad14c122f0269, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884089237 2024-12-11T02:28:10,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/793739044bd74de5bb74d114595baff9 is 50, key is test_row_0/A:col10/1733884089818/Put/seqid=0 2024-12-11T02:28:10,642 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:10,643 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/7dfe29bed2df437783b4149eff781823 is 50, key is test_row_0/A:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:10,650 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#449 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:10,651 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/bdc69716e44a41cda318ac3005a83280 is 50, key is test_row_0/B:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:10,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742357_1533 (size=12001) 2024-12-11T02:28:10,667 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/793739044bd74de5bb74d114595baff9 2024-12-11T02:28:10,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742358_1534 (size=12104) 2024-12-11T02:28:10,687 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/7dfe29bed2df437783b4149eff781823 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/7dfe29bed2df437783b4149eff781823 2024-12-11T02:28:10,693 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into 7dfe29bed2df437783b4149eff781823(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:10,693 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:10,693 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=13, startTime=1733884090621; duration=0sec 2024-12-11T02:28:10,693 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:10,693 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:10,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742359_1535 (size=12104) 2024-12-11T02:28:10,693 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:10,695 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:10,695 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:10,695 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,695 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=32.9 K 2024-12-11T02:28:10,696 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 973d81878d624dc5b14a367879e87759, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733884088597 2024-12-11T02:28:10,697 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0d7b064419a4bf6bd2a6f149a360c0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733884088613 2024-12-11T02:28:10,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/cb67d1ca61754cd6a1340dcbc9fc78fb is 50, key is test_row_0/B:col10/1733884089818/Put/seqid=0 2024-12-11T02:28:10,699 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting de77ef1042374c89b32ed77e54af09f6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884089237 2024-12-11T02:28:10,700 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/bdc69716e44a41cda318ac3005a83280 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bdc69716e44a41cda318ac3005a83280 2024-12-11T02:28:10,708 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into bdc69716e44a41cda318ac3005a83280(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:10,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:10,708 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=13, startTime=1733884090622; duration=0sec 2024-12-11T02:28:10,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:10,708 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:10,710 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:10,711 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/d895c101ea1d44e0b691d15e99cc382b is 50, key is test_row_0/C:col10/1733884088615/Put/seqid=0 2024-12-11T02:28:10,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742360_1536 (size=12001) 2024-12-11T02:28:10,718 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/cb67d1ca61754cd6a1340dcbc9fc78fb 2024-12-11T02:28:10,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742361_1537 (size=12104) 2024-12-11T02:28:10,733 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/d895c101ea1d44e0b691d15e99cc382b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/d895c101ea1d44e0b691d15e99cc382b 2024-12-11T02:28:10,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/22f7a44098ae4102bbddbefffe246328 is 50, key is test_row_0/C:col10/1733884089818/Put/seqid=0 2024-12-11T02:28:10,737 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into d895c101ea1d44e0b691d15e99cc382b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:10,737 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:10,737 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=13, startTime=1733884090622; duration=0sec 2024-12-11T02:28:10,738 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:10,738 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:10,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742362_1538 (size=12001) 2024-12-11T02:28:10,767 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/22f7a44098ae4102bbddbefffe246328 2024-12-11T02:28:10,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/793739044bd74de5bb74d114595baff9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9 2024-12-11T02:28:10,781 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T02:28:10,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/cb67d1ca61754cd6a1340dcbc9fc78fb as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb 2024-12-11T02:28:10,787 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T02:28:10,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/22f7a44098ae4102bbddbefffe246328 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328 2024-12-11T02:28:10,793 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T02:28:10,794 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 513ab21b2f5fe75f43e6defd51fe8517 in 167ms, sequenceid=74, compaction requested=false 2024-12-11T02:28:10,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:10,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-11T02:28:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-11T02:28:10,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-11T02:28:10,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0860 sec 2024-12-11T02:28:10,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.0900 sec 2024-12-11T02:28:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T02:28:10,812 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-11T02:28:10,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:10,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-11T02:28:10,815 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T02:28:10,816 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:10,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:10,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T02:28:10,967 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:10,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T02:28:10,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:10,969 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:10,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:10,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/328d649e82d74611884e9f54cefa69f4 is 50, key is test_row_0/A:col10/1733884090966/Put/seqid=0 2024-12-11T02:28:10,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742363_1539 (size=7315) 2024-12-11T02:28:10,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:10,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:11,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884151089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T02:28:11,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884151194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884151363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884151366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884151366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884151375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,394 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/328d649e82d74611884e9f54cefa69f4 2024-12-11T02:28:11,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884151401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/7f600d754e06465189af1c93246eee75 is 50, key is test_row_0/B:col10/1733884090966/Put/seqid=0 2024-12-11T02:28:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742364_1540 (size=7315) 2024-12-11T02:28:11,408 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/7f600d754e06465189af1c93246eee75 2024-12-11T02:28:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T02:28:11,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2e57ea4525344af2bda339360d149f8a is 50, key is test_row_0/C:col10/1733884090966/Put/seqid=0 2024-12-11T02:28:11,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742365_1541 (size=7315) 2024-12-11T02:28:11,570 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:28:11,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884151705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:11,831 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2e57ea4525344af2bda339360d149f8a 2024-12-11T02:28:11,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/328d649e82d74611884e9f54cefa69f4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4 2024-12-11T02:28:11,839 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4, entries=50, sequenceid=81, filesize=7.1 K 2024-12-11T02:28:11,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/7f600d754e06465189af1c93246eee75 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75 2024-12-11T02:28:11,846 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75, entries=50, sequenceid=81, filesize=7.1 K 2024-12-11T02:28:11,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2e57ea4525344af2bda339360d149f8a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a 2024-12-11T02:28:11,850 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a, entries=50, sequenceid=81, filesize=7.1 K 2024-12-11T02:28:11,850 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=194.56 KB/199230 for 513ab21b2f5fe75f43e6defd51fe8517 in 882ms, sequenceid=81, compaction requested=true 2024-12-11T02:28:11,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:11,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:11,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-11T02:28:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-11T02:28:11,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-11T02:28:11,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0360 sec 2024-12-11T02:28:11,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.0420 sec 2024-12-11T02:28:11,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T02:28:11,919 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-11T02:28:11,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-11T02:28:11,923 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:11,924 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:11,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:11,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T02:28:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T02:28:12,075 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:12,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-11T02:28:12,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:12,076 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-12-11T02:28:12,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:12,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f14312bb11774fafb0060fe0ae308f64 is 50, key is test_row_0/A:col10/1733884091059/Put/seqid=0 2024-12-11T02:28:12,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742366_1542 (size=12001) 2024-12-11T02:28:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:12,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:12,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884152217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:12,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T02:28:12,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884152321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:12,487 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f14312bb11774fafb0060fe0ae308f64 2024-12-11T02:28:12,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 is 50, key is test_row_0/B:col10/1733884091059/Put/seqid=0 2024-12-11T02:28:12,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742367_1543 (size=12001) 2024-12-11T02:28:12,519 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 2024-12-11T02:28:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T02:28:12,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/80dafb48879b4348abe9b4223814ffc1 is 50, key is test_row_0/C:col10/1733884091059/Put/seqid=0 2024-12-11T02:28:12,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:12,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884152524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:12,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742368_1544 (size=12001) 2024-12-11T02:28:12,549 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/80dafb48879b4348abe9b4223814ffc1 2024-12-11T02:28:12,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/f14312bb11774fafb0060fe0ae308f64 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64 2024-12-11T02:28:12,559 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64, entries=150, sequenceid=113, filesize=11.7 K 2024-12-11T02:28:12,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 2024-12-11T02:28:12,563 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8, entries=150, sequenceid=113, filesize=11.7 K 2024-12-11T02:28:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/80dafb48879b4348abe9b4223814ffc1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1 2024-12-11T02:28:12,568 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1, entries=150, sequenceid=113, filesize=11.7 K 2024-12-11T02:28:12,569 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for 513ab21b2f5fe75f43e6defd51fe8517 in 493ms, sequenceid=113, compaction requested=true 2024-12-11T02:28:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-11T02:28:12,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-11T02:28:12,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-11T02:28:12,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 648 msec 2024-12-11T02:28:12,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 651 msec 2024-12-11T02:28:12,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:12,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:12,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:12,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/18c0d2f85b3042869ffb47f2a7d1c6f9 is 50, key is test_row_0/A:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742369_1545 (size=14341) 2024-12-11T02:28:13,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T02:28:13,028 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-11T02:28:13,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:13,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-11T02:28:13,031 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:13,031 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:13,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:13,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884153031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:13,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884153141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/18c0d2f85b3042869ffb47f2a7d1c6f9 2024-12-11T02:28:13,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/2a9759fbde8444d4b1ae2670c99b8291 is 50, key is test_row_0/B:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:13,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742370_1546 (size=12001) 2024-12-11T02:28:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:13,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884153344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884153367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,369 DEBUG [Thread-2304 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:13,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884153372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,375 DEBUG [Thread-2310 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:13,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884153376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884153379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,384 DEBUG [Thread-2308 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:13,384 DEBUG [Thread-2312 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:13,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:13,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884153654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/2a9759fbde8444d4b1ae2670c99b8291 2024-12-11T02:28:13,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2b4e644cd74c4f07909d465d185d2c34 is 50, key is test_row_0/C:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:13,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742371_1547 (size=12001) 2024-12-11T02:28:13,793 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:13,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:13,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2b4e644cd74c4f07909d465d185d2c34 2024-12-11T02:28:14,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/18c0d2f85b3042869ffb47f2a7d1c6f9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9 2024-12-11T02:28:14,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9, entries=200, sequenceid=124, filesize=14.0 K 2024-12-11T02:28:14,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/2a9759fbde8444d4b1ae2670c99b8291 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291 2024-12-11T02:28:14,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291, entries=150, sequenceid=124, filesize=11.7 K 2024-12-11T02:28:14,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/2b4e644cd74c4f07909d465d185d2c34 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34 2024-12-11T02:28:14,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34, entries=150, sequenceid=124, filesize=11.7 K 2024-12-11T02:28:14,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 513ab21b2f5fe75f43e6defd51fe8517 in 1253ms, sequenceid=124, compaction requested=true 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:14,122 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:14,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:14,123 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:14,124 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57762 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:14,124 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 55422 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:14,124 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:14,124 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:14,124 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,124 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,124 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/7dfe29bed2df437783b4149eff781823, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=56.4 K 2024-12-11T02:28:14,125 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bdc69716e44a41cda318ac3005a83280, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=54.1 K 2024-12-11T02:28:14,125 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dfe29bed2df437783b4149eff781823, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884088615 2024-12-11T02:28:14,125 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting bdc69716e44a41cda318ac3005a83280, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884088615 2024-12-11T02:28:14,125 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cb67d1ca61754cd6a1340dcbc9fc78fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733884089818 2024-12-11T02:28:14,125 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 793739044bd74de5bb74d114595baff9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733884089818 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 328d649e82d74611884e9f54cefa69f4, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733884090966 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f600d754e06465189af1c93246eee75, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733884090966 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f14312bb11774fafb0060fe0ae308f64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733884091059 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fbbdb3d7d0cd4be1bd4e58376c1e76f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733884091059 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a9759fbde8444d4b1ae2670c99b8291, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092847 2024-12-11T02:28:14,126 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18c0d2f85b3042869ffb47f2a7d1c6f9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092835 2024-12-11T02:28:14,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:14,134 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:14,135 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e7413a5cd0604890bb584268abcd19c8 is 50, key is test_row_0/B:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:14,135 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:14,136 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/d11b0432482b41ba810f251eee3f7aac is 50, key is test_row_0/A:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:14,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742372_1548 (size=12275) 2024-12-11T02:28:14,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742373_1549 (size=12275) 2024-12-11T02:28:14,146 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/d11b0432482b41ba810f251eee3f7aac as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d11b0432482b41ba810f251eee3f7aac 2024-12-11T02:28:14,150 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into d11b0432482b41ba810f251eee3f7aac(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:14,150 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:14,150 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=11, startTime=1733884094122; duration=0sec 2024-12-11T02:28:14,151 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:14,151 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:14,151 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:14,152 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 55422 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:14,152 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:14,152 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,152 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/d895c101ea1d44e0b691d15e99cc382b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=54.1 K 2024-12-11T02:28:14,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d895c101ea1d44e0b691d15e99cc382b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733884088615 2024-12-11T02:28:14,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22f7a44098ae4102bbddbefffe246328, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733884089818 2024-12-11T02:28:14,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e57ea4525344af2bda339360d149f8a, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733884090966 2024-12-11T02:28:14,153 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80dafb48879b4348abe9b4223814ffc1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733884091059 2024-12-11T02:28:14,154 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b4e644cd74c4f07909d465d185d2c34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092847 2024-12-11T02:28:14,161 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#464 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:14,161 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/1394bb57a55e4acbb357d861d2eb055c is 50, key is test_row_0/C:col10/1733884092868/Put/seqid=0 2024-12-11T02:28:14,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742374_1550 (size=12275) 2024-12-11T02:28:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:14,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:14,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:14,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/95162c97f6f64112bc4237ca353212b0 is 50, key is test_row_0/A:col10/1733884094168/Put/seqid=0 2024-12-11T02:28:14,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742375_1551 (size=14541) 2024-12-11T02:28:14,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:14,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884154210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:14,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884154318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:14,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884154522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,544 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e7413a5cd0604890bb584268abcd19c8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7413a5cd0604890bb584268abcd19c8 2024-12-11T02:28:14,549 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into e7413a5cd0604890bb584268abcd19c8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:14,549 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:14,549 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=11, startTime=1733884094122; duration=0sec 2024-12-11T02:28:14,549 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:14,549 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:14,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,569 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/1394bb57a55e4acbb357d861d2eb055c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/1394bb57a55e4acbb357d861d2eb055c 2024-12-11T02:28:14,574 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into 1394bb57a55e4acbb357d861d2eb055c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:14,574 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:14,574 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=11, startTime=1733884094122; duration=0sec 2024-12-11T02:28:14,574 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:14,574 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:14,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/95162c97f6f64112bc4237ca353212b0 2024-12-11T02:28:14,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0823c613c9ef4cce8156b200cf43d0a1 is 50, key is test_row_0/B:col10/1733884094168/Put/seqid=0 2024-12-11T02:28:14,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742376_1552 (size=12151) 2024-12-11T02:28:14,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0823c613c9ef4cce8156b200cf43d0a1 2024-12-11T02:28:14,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/14b0b4da6e5a4827a3b241efe407a7fc is 50, key is test_row_0/C:col10/1733884094168/Put/seqid=0 2024-12-11T02:28:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742377_1553 (size=12151) 2024-12-11T02:28:14,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:14,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884154826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:14,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:14,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:14,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:15,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:15,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:15,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:15,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:15,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:15,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:15,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:15,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/14b0b4da6e5a4827a3b241efe407a7fc 2024-12-11T02:28:15,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/95162c97f6f64112bc4237ca353212b0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0 2024-12-11T02:28:15,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0, entries=200, sequenceid=151, filesize=14.2 K 2024-12-11T02:28:15,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0823c613c9ef4cce8156b200cf43d0a1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1 2024-12-11T02:28:15,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1, entries=150, sequenceid=151, filesize=11.9 K 2024-12-11T02:28:15,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/14b0b4da6e5a4827a3b241efe407a7fc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc 2024-12-11T02:28:15,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc, entries=150, sequenceid=151, filesize=11.9 K 2024-12-11T02:28:15,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 513ab21b2f5fe75f43e6defd51fe8517 in 869ms, sequenceid=151, compaction requested=false 2024-12-11T02:28:15,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:15,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:15,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:15,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:15,169 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:15,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/498d60dfc0b346439a1745aecf1a82a1 is 50, key is test_row_0/A:col10/1733884094178/Put/seqid=0 2024-12-11T02:28:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742378_1554 (size=12151) 2024-12-11T02:28:15,177 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/498d60dfc0b346439a1745aecf1a82a1 2024-12-11T02:28:15,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0686e10c445643268af8172aac9cc613 is 50, key is test_row_0/B:col10/1733884094178/Put/seqid=0 2024-12-11T02:28:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742379_1555 (size=12151) 2024-12-11T02:28:15,201 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0686e10c445643268af8172aac9cc613 2024-12-11T02:28:15,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/92be1f6955bd461aa0f0e4afb5f2398c is 50, key is test_row_0/C:col10/1733884094178/Put/seqid=0 2024-12-11T02:28:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742380_1556 (size=12151) 2024-12-11T02:28:15,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:15,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:15,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:15,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884155451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:15,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884155558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:15,614 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/92be1f6955bd461aa0f0e4afb5f2398c 2024-12-11T02:28:15,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/498d60dfc0b346439a1745aecf1a82a1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1 2024-12-11T02:28:15,620 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1, entries=150, sequenceid=163, filesize=11.9 K 2024-12-11T02:28:15,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0686e10c445643268af8172aac9cc613 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613 2024-12-11T02:28:15,624 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613, entries=150, sequenceid=163, filesize=11.9 K 2024-12-11T02:28:15,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/92be1f6955bd461aa0f0e4afb5f2398c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c 2024-12-11T02:28:15,628 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c, entries=150, sequenceid=163, filesize=11.9 K 2024-12-11T02:28:15,629 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 513ab21b2f5fe75f43e6defd51fe8517 in 460ms, sequenceid=163, compaction requested=true 2024-12-11T02:28:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-11T02:28:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-11T02:28:15,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-11T02:28:15,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5990 sec 2024-12-11T02:28:15,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.6030 sec 2024-12-11T02:28:15,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:15,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:15,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/8641a740fc654f95a5429c32b63dc82e is 50, key is test_row_0/A:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742381_1557 (size=14541) 2024-12-11T02:28:15,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:15,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884155801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:15,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:15,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884155908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:16,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:16,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884156113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:16,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/8641a740fc654f95a5429c32b63dc82e 2024-12-11T02:28:16,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0170efd5633740e082cdf44e8e4518a9 is 50, key is test_row_0/B:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:16,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742382_1558 (size=12151) 2024-12-11T02:28:16,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884156418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:16,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0170efd5633740e082cdf44e8e4518a9 2024-12-11T02:28:16,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bcfa1a6b19a24fb1a9d2c285d28e1964 is 50, key is test_row_0/C:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:16,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742383_1559 (size=12151) 2024-12-11T02:28:16,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884156929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bcfa1a6b19a24fb1a9d2c285d28e1964 2024-12-11T02:28:17,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/8641a740fc654f95a5429c32b63dc82e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e 2024-12-11T02:28:17,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e, entries=200, sequenceid=190, filesize=14.2 K 2024-12-11T02:28:17,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/0170efd5633740e082cdf44e8e4518a9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9 2024-12-11T02:28:17,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9, entries=150, sequenceid=190, filesize=11.9 K 2024-12-11T02:28:17,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bcfa1a6b19a24fb1a9d2c285d28e1964 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964 2024-12-11T02:28:17,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964, entries=150, sequenceid=190, filesize=11.9 K 2024-12-11T02:28:17,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 513ab21b2f5fe75f43e6defd51fe8517 in 1248ms, sequenceid=190, compaction requested=true 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:17,019 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:17,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:17,019 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:17,021 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53508 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:17,021 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:17,021 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:17,022 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d11b0432482b41ba810f251eee3f7aac, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=52.3 K 2024-12-11T02:28:17,022 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48728 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:17,022 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:17,022 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:17,022 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7413a5cd0604890bb584268abcd19c8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=47.6 K 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d11b0432482b41ba810f251eee3f7aac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092847 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e7413a5cd0604890bb584268abcd19c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092847 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95162c97f6f64112bc4237ca353212b0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733884092994 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0823c613c9ef4cce8156b200cf43d0a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733884092994 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 498d60dfc0b346439a1745aecf1a82a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733884094178 2024-12-11T02:28:17,023 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0686e10c445643268af8172aac9cc613, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733884094178 2024-12-11T02:28:17,024 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8641a740fc654f95a5429c32b63dc82e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:17,024 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0170efd5633740e082cdf44e8e4518a9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:17,034 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#474 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:17,035 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/4d125262ae9b496999163357a3c303be is 50, key is test_row_0/A:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:17,043 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:17,043 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/30cc0e256bd44fd4b827d7261ac4e601 is 50, key is test_row_0/B:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:17,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742384_1560 (size=12561) 2024-12-11T02:28:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742385_1561 (size=12561) 2024-12-11T02:28:17,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-11T02:28:17,135 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-11T02:28:17,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-11T02:28:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T02:28:17,138 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:17,139 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:17,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:17,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T02:28:17,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:17,291 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:17,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/814fc6d6d7304b0796e3c331033a911a is 50, key is test_row_0/A:col10/1733884095793/Put/seqid=0 2024-12-11T02:28:17,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742386_1562 (size=12151) 2024-12-11T02:28:17,300 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/814fc6d6d7304b0796e3c331033a911a 2024-12-11T02:28:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/04c47290510e464bafc615b11ccec373 is 50, key is test_row_0/B:col10/1733884095793/Put/seqid=0 2024-12-11T02:28:17,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742387_1563 (size=12151) 2024-12-11T02:28:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:17,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T02:28:17,450 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/4d125262ae9b496999163357a3c303be as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d125262ae9b496999163357a3c303be 2024-12-11T02:28:17,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884157444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884157445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884157446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884157448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,455 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into 4d125262ae9b496999163357a3c303be(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:17,455 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:17,455 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=12, startTime=1733884097019; duration=0sec 2024-12-11T02:28:17,455 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:17,455 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:17,455 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:17,457 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48728 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:17,457 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:17,457 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:17,457 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/1394bb57a55e4acbb357d861d2eb055c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=47.6 K 2024-12-11T02:28:17,458 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1394bb57a55e4acbb357d861d2eb055c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733884092847 2024-12-11T02:28:17,458 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14b0b4da6e5a4827a3b241efe407a7fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1733884092994 2024-12-11T02:28:17,458 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92be1f6955bd461aa0f0e4afb5f2398c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733884094178 2024-12-11T02:28:17,458 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcfa1a6b19a24fb1a9d2c285d28e1964, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:17,466 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:17,466 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f90a0054f2bf41429f7c4bbae4a77652 is 50, key is test_row_0/C:col10/1733884095450/Put/seqid=0 2024-12-11T02:28:17,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742388_1564 (size=12561) 2024-12-11T02:28:17,472 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/30cc0e256bd44fd4b827d7261ac4e601 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/30cc0e256bd44fd4b827d7261ac4e601 2024-12-11T02:28:17,477 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into 30cc0e256bd44fd4b827d7261ac4e601(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:17,477 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:17,477 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=12, startTime=1733884097019; duration=0sec 2024-12-11T02:28:17,477 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:17,477 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:17,479 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f90a0054f2bf41429f7c4bbae4a77652 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f90a0054f2bf41429f7c4bbae4a77652 2024-12-11T02:28:17,484 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into f90a0054f2bf41429f7c4bbae4a77652(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:17,484 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:17,484 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=12, startTime=1733884097019; duration=0sec 2024-12-11T02:28:17,484 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:17,484 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:17,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884157552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884157555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884157555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884157556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,710 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/04c47290510e464bafc615b11ccec373 2024-12-11T02:28:17,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bd72226452404169b0ab574eccbd80a3 is 50, key is test_row_0/C:col10/1733884095793/Put/seqid=0 2024-12-11T02:28:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742389_1565 (size=12151) 2024-12-11T02:28:17,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T02:28:17,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884157757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884157759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884157760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884157760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:17,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884157934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884158064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884158064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884158064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884158065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,121 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bd72226452404169b0ab574eccbd80a3 2024-12-11T02:28:18,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/814fc6d6d7304b0796e3c331033a911a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a 2024-12-11T02:28:18,128 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a, entries=150, sequenceid=199, filesize=11.9 K 2024-12-11T02:28:18,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/04c47290510e464bafc615b11ccec373 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373 2024-12-11T02:28:18,134 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373, entries=150, sequenceid=199, filesize=11.9 K 2024-12-11T02:28:18,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bd72226452404169b0ab574eccbd80a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3 2024-12-11T02:28:18,138 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3, entries=150, sequenceid=199, filesize=11.9 K 2024-12-11T02:28:18,139 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 513ab21b2f5fe75f43e6defd51fe8517 in 848ms, sequenceid=199, compaction requested=false 2024-12-11T02:28:18,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:18,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:18,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-11T02:28:18,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-11T02:28:18,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-11T02:28:18,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0010 sec 2024-12-11T02:28:18,143 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.0060 sec 2024-12-11T02:28:18,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T02:28:18,240 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-11T02:28:18,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:18,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-11T02:28:18,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-11T02:28:18,243 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:18,244 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:18,244 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:18,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-11T02:28:18,395 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:18,396 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:18,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/89496b29cd1943428078150730bf0c8d is 50, key is test_row_0/A:col10/1733884097446/Put/seqid=0 2024-12-11T02:28:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742390_1566 (size=12151) 2024-12-11T02:28:18,407 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/89496b29cd1943428078150730bf0c8d 2024-12-11T02:28:18,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e7bd9c1b6e064319a5242a1256085ed4 is 50, key is test_row_0/B:col10/1733884097446/Put/seqid=0 2024-12-11T02:28:18,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742391_1567 (size=12151) 2024-12-11T02:28:18,438 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e7bd9c1b6e064319a5242a1256085ed4 2024-12-11T02:28:18,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/6cd4a34133b943e197d5de5c8645fe03 is 50, key is test_row_0/C:col10/1733884097446/Put/seqid=0 2024-12-11T02:28:18,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742392_1568 (size=12151) 2024-12-11T02:28:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-11T02:28:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:18,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884158574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884158577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884158578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884158579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884158680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884158683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884158683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884158684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-11T02:28:18,864 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/6cd4a34133b943e197d5de5c8645fe03 2024-12-11T02:28:18,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/89496b29cd1943428078150730bf0c8d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d 2024-12-11T02:28:18,872 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d, entries=150, sequenceid=229, filesize=11.9 K 2024-12-11T02:28:18,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e7bd9c1b6e064319a5242a1256085ed4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4 2024-12-11T02:28:18,875 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4, entries=150, sequenceid=229, filesize=11.9 K 2024-12-11T02:28:18,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/6cd4a34133b943e197d5de5c8645fe03 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03 2024-12-11T02:28:18,879 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03, entries=150, sequenceid=229, filesize=11.9 K 2024-12-11T02:28:18,880 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 513ab21b2f5fe75f43e6defd51fe8517 in 484ms, sequenceid=229, compaction requested=true 2024-12-11T02:28:18,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:18,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:18,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-11T02:28:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-11T02:28:18,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-11T02:28:18,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 637 msec 2024-12-11T02:28:18,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 640 msec 2024-12-11T02:28:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:18,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:18,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:18,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/d2ff777e209c4a5fbab76e2e31aa354e is 50, key is test_row_0/A:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:18,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742393_1569 (size=19321) 2024-12-11T02:28:18,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/d2ff777e209c4a5fbab76e2e31aa354e 2024-12-11T02:28:18,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/bef242684df74f37988999dc4e985f0d is 50, key is test_row_0/B:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:18,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742394_1570 (size=12151) 2024-12-11T02:28:18,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884158961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884158962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884158968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884158969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884159070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884159070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884159075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884159076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884159277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884159277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884159278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884159278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/bef242684df74f37988999dc4e985f0d 2024-12-11T02:28:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-11T02:28:19,346 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-11T02:28:19,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:19,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-11T02:28:19,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T02:28:19,349 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:19,350 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:19,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:19,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/cb60472ce4b145138f59f66c03411dc4 is 50, key is test_row_0/C:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:19,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742395_1571 (size=12151) 2024-12-11T02:28:19,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T02:28:19,502 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T02:28:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884159586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884159587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884159587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:19,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884159588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T02:28:19,655 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T02:28:19,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:19,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/cb60472ce4b145138f59f66c03411dc4 2024-12-11T02:28:19,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/d2ff777e209c4a5fbab76e2e31aa354e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e 2024-12-11T02:28:19,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e, entries=300, sequenceid=242, filesize=18.9 K 2024-12-11T02:28:19,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/bef242684df74f37988999dc4e985f0d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d 2024-12-11T02:28:19,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d, entries=150, sequenceid=242, filesize=11.9 K 2024-12-11T02:28:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/cb60472ce4b145138f59f66c03411dc4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4 2024-12-11T02:28:19,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4, entries=150, sequenceid=242, filesize=11.9 K 2024-12-11T02:28:19,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 513ab21b2f5fe75f43e6defd51fe8517 in 867ms, sequenceid=242, compaction requested=true 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:19,788 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:19,788 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:19,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:19,789 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:19,789 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56184 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:19,789 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:19,789 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:19,790 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,790 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d125262ae9b496999163357a3c303be, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=54.9 K 2024-12-11T02:28:19,790 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,790 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/30cc0e256bd44fd4b827d7261ac4e601, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=47.9 K 2024-12-11T02:28:19,790 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d125262ae9b496999163357a3c303be, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:19,790 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 30cc0e256bd44fd4b827d7261ac4e601, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:19,790 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 814fc6d6d7304b0796e3c331033a911a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733884095788 2024-12-11T02:28:19,791 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 04c47290510e464bafc615b11ccec373, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733884095788 2024-12-11T02:28:19,791 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89496b29cd1943428078150730bf0c8d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733884097434 2024-12-11T02:28:19,791 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2ff777e209c4a5fbab76e2e31aa354e, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:19,791 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e7bd9c1b6e064319a5242a1256085ed4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733884097434 2024-12-11T02:28:19,791 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting bef242684df74f37988999dc4e985f0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:19,799 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:19,799 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/4d650aadeeba404ba47a2576cf703d53 is 50, key is test_row_0/A:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:19,801 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:19,802 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/545ca0f289fb4a238eb14be8d03071c9 is 50, key is test_row_0/B:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:19,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:19,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T02:28:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,808 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:28:19,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:19,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:19,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:19,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:19,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:19,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:19,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742396_1572 (size=12697) 2024-12-11T02:28:19,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/41d3261bf83e4d4a83d6b2dad2bd7216 is 50, key is test_row_0/A:col10/1733884098968/Put/seqid=0 2024-12-11T02:28:19,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742397_1573 (size=12697) 2024-12-11T02:28:19,823 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/4d650aadeeba404ba47a2576cf703d53 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d650aadeeba404ba47a2576cf703d53 2024-12-11T02:28:19,829 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into 4d650aadeeba404ba47a2576cf703d53(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:19,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:19,829 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=12, startTime=1733884099788; duration=0sec 2024-12-11T02:28:19,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:19,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:19,829 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:19,830 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:19,830 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:19,831 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:19,831 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f90a0054f2bf41429f7c4bbae4a77652, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=47.9 K 2024-12-11T02:28:19,831 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f90a0054f2bf41429f7c4bbae4a77652, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733884095439 2024-12-11T02:28:19,831 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd72226452404169b0ab574eccbd80a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733884095788 2024-12-11T02:28:19,831 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cd4a34133b943e197d5de5c8645fe03, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733884097434 2024-12-11T02:28:19,832 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb60472ce4b145138f59f66c03411dc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742398_1574 (size=12301) 2024-12-11T02:28:19,848 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:19,848 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/c9dede7260b94a13b499d8cf800c6bbc is 50, key is test_row_0/C:col10/1733884098921/Put/seqid=0 2024-12-11T02:28:19,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742399_1575 (size=12697) 2024-12-11T02:28:19,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T02:28:20,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884159998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884160091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884160092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884160094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884160094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884160105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,228 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/545ca0f289fb4a238eb14be8d03071c9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/545ca0f289fb4a238eb14be8d03071c9 2024-12-11T02:28:20,232 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into 545ca0f289fb4a238eb14be8d03071c9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:20,232 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:20,232 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=12, startTime=1733884099788; duration=0sec 2024-12-11T02:28:20,232 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:20,232 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:20,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/41d3261bf83e4d4a83d6b2dad2bd7216 2024-12-11T02:28:20,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/170554fc147143af9d846ac1e44e296f is 50, key is test_row_0/B:col10/1733884098968/Put/seqid=0 2024-12-11T02:28:20,258 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/c9dede7260b94a13b499d8cf800c6bbc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/c9dede7260b94a13b499d8cf800c6bbc 2024-12-11T02:28:20,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742400_1576 (size=12301) 2024-12-11T02:28:20,259 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/170554fc147143af9d846ac1e44e296f 2024-12-11T02:28:20,265 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into c9dede7260b94a13b499d8cf800c6bbc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:20,265 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:20,265 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=12, startTime=1733884099788; duration=0sec 2024-12-11T02:28:20,266 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:20,266 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:20,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f9e917c7a96c483b954f57942facee4d is 50, key is test_row_0/C:col10/1733884098968/Put/seqid=0 2024-12-11T02:28:20,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742401_1577 (size=12301) 2024-12-11T02:28:20,273 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f9e917c7a96c483b954f57942facee4d 2024-12-11T02:28:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/41d3261bf83e4d4a83d6b2dad2bd7216 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216 2024-12-11T02:28:20,285 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216, entries=150, sequenceid=265, filesize=12.0 K 2024-12-11T02:28:20,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/170554fc147143af9d846ac1e44e296f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f 2024-12-11T02:28:20,291 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f, entries=150, sequenceid=265, filesize=12.0 K 2024-12-11T02:28:20,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f9e917c7a96c483b954f57942facee4d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d 2024-12-11T02:28:20,298 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d, entries=150, sequenceid=265, filesize=12.0 K 2024-12-11T02:28:20,298 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 513ab21b2f5fe75f43e6defd51fe8517 in 490ms, sequenceid=265, compaction requested=false 2024-12-11T02:28:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-11T02:28:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-11T02:28:20,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-11T02:28:20,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 950 msec 2024-12-11T02:28:20,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 954 msec 2024-12-11T02:28:20,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:20,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:20,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:20,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/a82bc06fe2284223829aac673cd03c6d is 50, key is test_row_0/A:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:20,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742402_1578 (size=14741) 2024-12-11T02:28:20,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884160403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T02:28:20,453 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-11T02:28:20,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:20,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-11T02:28:20,455 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:20,456 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:20,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:20,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884160508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:20,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T02:28:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:20,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884160713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/a82bc06fe2284223829aac673cd03c6d 2024-12-11T02:28:20,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/12781ec311674d3abeb4e60857af0bfa is 50, key is test_row_0/B:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:20,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742403_1579 (size=12301) 2024-12-11T02:28:20,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/12781ec311674d3abeb4e60857af0bfa 2024-12-11T02:28:20,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/32899181628d46dabf6b4d3aa74c04ab is 50, key is test_row_0/C:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:20,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742404_1580 (size=12301) 2024-12-11T02:28:20,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:20,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T02:28:20,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:20,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:20,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T02:28:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:20,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:21,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884161017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:21,065 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T02:28:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:21,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:21,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884161101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884161102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884161102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884161110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/32899181628d46dabf6b4d3aa74c04ab 2024-12-11T02:28:21,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/a82bc06fe2284223829aac673cd03c6d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d 2024-12-11T02:28:21,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d, entries=200, sequenceid=282, filesize=14.4 K 2024-12-11T02:28:21,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/12781ec311674d3abeb4e60857af0bfa as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa 2024-12-11T02:28:21,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa, entries=150, sequenceid=282, filesize=12.0 K 2024-12-11T02:28:21,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/32899181628d46dabf6b4d3aa74c04ab as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab 2024-12-11T02:28:21,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab, entries=150, sequenceid=282, filesize=12.0 K 2024-12-11T02:28:21,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 513ab21b2f5fe75f43e6defd51fe8517 in 853ms, sequenceid=282, compaction requested=true 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:21,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:21,168 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:21,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:21,169 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:21,169 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39739 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:21,169 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:21,169 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:21,169 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:21,169 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,169 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,170 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d650aadeeba404ba47a2576cf703d53, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=38.8 K 2024-12-11T02:28:21,170 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/545ca0f289fb4a238eb14be8d03071c9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.4 K 2024-12-11T02:28:21,170 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 545ca0f289fb4a238eb14be8d03071c9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:21,170 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d650aadeeba404ba47a2576cf703d53, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:21,170 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 170554fc147143af9d846ac1e44e296f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733884098960 2024-12-11T02:28:21,170 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41d3261bf83e4d4a83d6b2dad2bd7216, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733884098960 2024-12-11T02:28:21,170 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 12781ec311674d3abeb4e60857af0bfa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:21,171 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a82bc06fe2284223829aac673cd03c6d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:21,180 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:21,180 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:21,181 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/8f277de711164713afbf644b3d846e8f is 50, key is test_row_0/B:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:21,181 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/40831c05dd724217b8c1c22ed9028743 is 50, key is test_row_0/A:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:21,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742406_1582 (size=12949) 2024-12-11T02:28:21,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742405_1581 (size=12949) 2024-12-11T02:28:21,189 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/8f277de711164713afbf644b3d846e8f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/8f277de711164713afbf644b3d846e8f 2024-12-11T02:28:21,189 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/40831c05dd724217b8c1c22ed9028743 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/40831c05dd724217b8c1c22ed9028743 2024-12-11T02:28:21,195 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into 8f277de711164713afbf644b3d846e8f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:21,195 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=13, startTime=1733884101168; duration=0sec 2024-12-11T02:28:21,195 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into 40831c05dd724217b8c1c22ed9028743(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:21,195 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=13, startTime=1733884101168; duration=0sec 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:21,195 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:21,196 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:21,196 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:21,196 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,196 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/c9dede7260b94a13b499d8cf800c6bbc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.4 K 2024-12-11T02:28:21,196 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c9dede7260b94a13b499d8cf800c6bbc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1733884098577 2024-12-11T02:28:21,197 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f9e917c7a96c483b954f57942facee4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733884098960 2024-12-11T02:28:21,197 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 32899181628d46dabf6b4d3aa74c04ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:21,202 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#497 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:21,203 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/ead2f317af844393a07df314e1582004 is 50, key is test_row_0/C:col10/1733884099985/Put/seqid=0 2024-12-11T02:28:21,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742407_1583 (size=12949) 2024-12-11T02:28:21,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:21,219 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:21,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:21,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e4ba8d38ee9248f4901d04ce2822b54e is 50, key is test_row_0/A:col10/1733884100400/Put/seqid=0 2024-12-11T02:28:21,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742408_1584 (size=12301) 2024-12-11T02:28:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:21,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:21,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884161593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,611 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/ead2f317af844393a07df314e1582004 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/ead2f317af844393a07df314e1582004 2024-12-11T02:28:21,615 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into ead2f317af844393a07df314e1582004(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:21,615 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:21,615 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=13, startTime=1733884101168; duration=0sec 2024-12-11T02:28:21,615 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:21,615 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:21,628 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e4ba8d38ee9248f4901d04ce2822b54e 2024-12-11T02:28:21,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a94bbc19ccc24acf93aefa325532cffa is 50, key is test_row_0/B:col10/1733884100400/Put/seqid=0 2024-12-11T02:28:21,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742409_1585 (size=12301) 2024-12-11T02:28:21,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884161699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:21,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:21,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884161903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:22,038 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a94bbc19ccc24acf93aefa325532cffa 2024-12-11T02:28:22,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f5ded79c4aee484ab683420143479ef4 is 50, key is test_row_0/C:col10/1733884100400/Put/seqid=0 2024-12-11T02:28:22,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742410_1586 (size=12301) 2024-12-11T02:28:22,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:22,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884162209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:22,450 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f5ded79c4aee484ab683420143479ef4 2024-12-11T02:28:22,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e4ba8d38ee9248f4901d04ce2822b54e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e 2024-12-11T02:28:22,462 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e, entries=150, sequenceid=306, filesize=12.0 K 2024-12-11T02:28:22,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a94bbc19ccc24acf93aefa325532cffa as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa 2024-12-11T02:28:22,466 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa, entries=150, sequenceid=306, filesize=12.0 K 2024-12-11T02:28:22,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/f5ded79c4aee484ab683420143479ef4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4 2024-12-11T02:28:22,470 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4, entries=150, sequenceid=306, filesize=12.0 K 2024-12-11T02:28:22,471 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 513ab21b2f5fe75f43e6defd51fe8517 in 1252ms, sequenceid=306, compaction requested=false 2024-12-11T02:28:22,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:22,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:22,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-11T02:28:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-11T02:28:22,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-11T02:28:22,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0160 sec 2024-12-11T02:28:22,474 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.0190 sec 2024-12-11T02:28:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T02:28:22,559 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-11T02:28:22,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:22,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-11T02:28:22,562 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:22,563 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:22,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T02:28:22,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T02:28:22,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:22,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T02:28:22,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:22,716 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:22,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:22,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e34cca3f02984327a669201ba84f18c5 is 50, key is test_row_0/A:col10/1733884101587/Put/seqid=0 2024-12-11T02:28:22,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742411_1587 (size=12301) 2024-12-11T02:28:22,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884162822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:22,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T02:28:22,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884162928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884163111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,115 DEBUG [Thread-2310 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:23,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884163115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,117 DEBUG [Thread-2312 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:23,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884163118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884163118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,122 DEBUG [Thread-2304 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:23,122 DEBUG [Thread-2308 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:23,125 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e34cca3f02984327a669201ba84f18c5 2024-12-11T02:28:23,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a38abc5bfdd44fa1af27183a104650b6 is 50, key is test_row_0/B:col10/1733884101587/Put/seqid=0 2024-12-11T02:28:23,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884163134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742412_1588 (size=12301) 2024-12-11T02:28:23,139 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a38abc5bfdd44fa1af27183a104650b6 2024-12-11T02:28:23,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bc101f2787ba48bca8708d2462912ed1 is 50, key is test_row_0/C:col10/1733884101587/Put/seqid=0 2024-12-11T02:28:23,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742413_1589 (size=12301) 2024-12-11T02:28:23,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T02:28:23,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:23,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884163439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,551 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bc101f2787ba48bca8708d2462912ed1 2024-12-11T02:28:23,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e34cca3f02984327a669201ba84f18c5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5 2024-12-11T02:28:23,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5, entries=150, sequenceid=321, filesize=12.0 K 2024-12-11T02:28:23,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/a38abc5bfdd44fa1af27183a104650b6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6 2024-12-11T02:28:23,562 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6, entries=150, sequenceid=321, filesize=12.0 K 2024-12-11T02:28:23,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/bc101f2787ba48bca8708d2462912ed1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1 2024-12-11T02:28:23,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1, entries=150, sequenceid=321, filesize=12.0 K 2024-12-11T02:28:23,567 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 513ab21b2f5fe75f43e6defd51fe8517 in 851ms, sequenceid=321, compaction requested=true 2024-12-11T02:28:23,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:23,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:23,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-11T02:28:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-11T02:28:23,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-11T02:28:23,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0060 sec 2024-12-11T02:28:23,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.0090 sec 2024-12-11T02:28:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T02:28:23,675 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-11T02:28:23,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:23,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-11T02:28:23,678 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:23,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T02:28:23,678 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:23,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T02:28:23,830 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:23,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-11T02:28:23,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:23,831 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/c40f9cbe543046eca39a1370496df906 is 50, key is test_row_0/A:col10/1733884102821/Put/seqid=0 2024-12-11T02:28:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742414_1590 (size=12301) 2024-12-11T02:28:23,840 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/c40f9cbe543046eca39a1370496df906 2024-12-11T02:28:23,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/63b7b3e21465495eb36d75c9dd28c7e1 is 50, key is test_row_0/B:col10/1733884102821/Put/seqid=0 2024-12-11T02:28:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742415_1591 (size=12301) 2024-12-11T02:28:23,855 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/63b7b3e21465495eb36d75c9dd28c7e1 2024-12-11T02:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/84519cac012c4ea6b984006ea4797f75 is 50, key is test_row_0/C:col10/1733884102821/Put/seqid=0 2024-12-11T02:28:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742416_1592 (size=12301) 2024-12-11T02:28:23,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:23,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:23,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T02:28:24,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:24,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884164002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:24,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884164110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,270 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/84519cac012c4ea6b984006ea4797f75 2024-12-11T02:28:24,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/c40f9cbe543046eca39a1370496df906 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906 2024-12-11T02:28:24,278 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T02:28:24,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/63b7b3e21465495eb36d75c9dd28c7e1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1 2024-12-11T02:28:24,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T02:28:24,289 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T02:28:24,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/84519cac012c4ea6b984006ea4797f75 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75 2024-12-11T02:28:24,293 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T02:28:24,294 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 513ab21b2f5fe75f43e6defd51fe8517 in 464ms, sequenceid=343, compaction requested=true 2024-12-11T02:28:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-11T02:28:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-11T02:28:24,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-11T02:28:24,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 617 msec 2024-12-11T02:28:24,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 620 msec 2024-12-11T02:28:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:24,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:24,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/20bcdba1a3d34e0f92da8714f411f523 is 50, key is test_row_0/A:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:24,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742417_1593 (size=14741) 2024-12-11T02:28:24,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:24,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884164430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884164537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/20bcdba1a3d34e0f92da8714f411f523 2024-12-11T02:28:24,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/ee22de686aad4639a92a4cfc29245dae is 50, key is test_row_0/B:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742418_1594 (size=12301) 2024-12-11T02:28:24,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/ee22de686aad4639a92a4cfc29245dae 2024-12-11T02:28:24,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/707fda4bca6d4263be83be0e459aa820 is 50, key is test_row_0/C:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:24,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 310 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884164743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742419_1595 (size=12301) 2024-12-11T02:28:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T02:28:24,781 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-11T02:28:24,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-11T02:28:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:24,784 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:24,784 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:24,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:24,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:24,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T02:28:24,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:24,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:24,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:24,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:24,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:25,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 312 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884165050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:25,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:25,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T02:28:25,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:25,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:25,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:25,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/707fda4bca6d4263be83be0e459aa820 2024-12-11T02:28:25,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/20bcdba1a3d34e0f92da8714f411f523 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523 2024-12-11T02:28:25,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523, entries=200, sequenceid=358, filesize=14.4 K 2024-12-11T02:28:25,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/ee22de686aad4639a92a4cfc29245dae as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae 2024-12-11T02:28:25,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae, entries=150, sequenceid=358, filesize=12.0 K 2024-12-11T02:28:25,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/707fda4bca6d4263be83be0e459aa820 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820 2024-12-11T02:28:25,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820, entries=150, sequenceid=358, filesize=12.0 K 2024-12-11T02:28:25,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 513ab21b2f5fe75f43e6defd51fe8517 in 859ms, sequenceid=358, compaction requested=true 2024-12-11T02:28:25,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:25,176 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:25,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:25,176 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:25,177 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 64593 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:25,177 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:25,177 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:25,177 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:25,177 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,177 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,177 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/8f277de711164713afbf644b3d846e8f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=60.7 K 2024-12-11T02:28:25,177 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/40831c05dd724217b8c1c22ed9028743, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=63.1 K 2024-12-11T02:28:25,178 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40831c05dd724217b8c1c22ed9028743, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:25,178 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f277de711164713afbf644b3d846e8f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:25,178 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4ba8d38ee9248f4901d04ce2822b54e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733884100394 2024-12-11T02:28:25,178 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a94bbc19ccc24acf93aefa325532cffa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733884100394 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a38abc5bfdd44fa1af27183a104650b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733884101570 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e34cca3f02984327a669201ba84f18c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733884101570 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 63b7b3e21465495eb36d75c9dd28c7e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884102780 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c40f9cbe543046eca39a1370496df906, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884102780 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20bcdba1a3d34e0f92da8714f411f523, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:25,179 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ee22de686aad4639a92a4cfc29245dae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:25,190 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:25,190 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/dcdac52ff04445179f562e35a4163bdc is 50, key is test_row_0/A:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:25,193 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#511 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:25,194 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/9585914e51734012a7d4a76cf49312a1 is 50, key is test_row_0/B:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:25,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742420_1596 (size=13119) 2024-12-11T02:28:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742421_1597 (size=13119) 2024-12-11T02:28:25,202 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/dcdac52ff04445179f562e35a4163bdc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/dcdac52ff04445179f562e35a4163bdc 2024-12-11T02:28:25,207 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into dcdac52ff04445179f562e35a4163bdc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:25,207 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:25,207 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=11, startTime=1733884105176; duration=0sec 2024-12-11T02:28:25,207 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:25,207 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:25,207 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-11T02:28:25,209 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-11T02:28:25,209 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:25,209 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,209 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/ead2f317af844393a07df314e1582004, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=60.7 K 2024-12-11T02:28:25,209 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ead2f317af844393a07df314e1582004, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733884099985 2024-12-11T02:28:25,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5ded79c4aee484ab683420143479ef4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733884100394 2024-12-11T02:28:25,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc101f2787ba48bca8708d2462912ed1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733884101570 2024-12-11T02:28:25,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84519cac012c4ea6b984006ea4797f75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884102780 2024-12-11T02:28:25,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 707fda4bca6d4263be83be0e459aa820, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:25,219 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#512 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:25,219 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/46643e6f589a4de787c69a4abcae7428 is 50, key is test_row_0/C:col10/1733884103990/Put/seqid=0 2024-12-11T02:28:25,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742422_1598 (size=13119) 2024-12-11T02:28:25,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:25,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:25,242 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:25,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:25,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:25,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/93484a674ce94476b4fa3ea6f8a75e3c is 50, key is test_row_0/A:col10/1733884104411/Put/seqid=0 2024-12-11T02:28:25,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742423_1599 (size=12301) 2024-12-11T02:28:25,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:25,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:25,603 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/9585914e51734012a7d4a76cf49312a1 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/9585914e51734012a7d4a76cf49312a1 2024-12-11T02:28:25,609 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into 9585914e51734012a7d4a76cf49312a1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:25,609 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:25,609 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=11, startTime=1733884105176; duration=0sec 2024-12-11T02:28:25,609 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:25,609 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:25,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 326 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884165618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:25,630 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/46643e6f589a4de787c69a4abcae7428 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/46643e6f589a4de787c69a4abcae7428 2024-12-11T02:28:25,634 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into 46643e6f589a4de787c69a4abcae7428(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:25,634 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:25,634 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=11, startTime=1733884105176; duration=0sec 2024-12-11T02:28:25,634 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:25,634 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:25,653 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/93484a674ce94476b4fa3ea6f8a75e3c 2024-12-11T02:28:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/603ebb45fb964cfca0881f401939087e is 50, key is test_row_0/B:col10/1733884104411/Put/seqid=0 2024-12-11T02:28:25,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742424_1600 (size=12301) 2024-12-11T02:28:25,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 328 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884165724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:25,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 330 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884165928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:26,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/603ebb45fb964cfca0881f401939087e 2024-12-11T02:28:26,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/4a96c3f05b764b40af160a1a70871840 is 50, key is test_row_0/C:col10/1733884104411/Put/seqid=0 2024-12-11T02:28:26,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742425_1601 (size=12301) 2024-12-11T02:28:26,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 332 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884166235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:26,474 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/4a96c3f05b764b40af160a1a70871840 2024-12-11T02:28:26,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/93484a674ce94476b4fa3ea6f8a75e3c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c 2024-12-11T02:28:26,488 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:28:26,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/603ebb45fb964cfca0881f401939087e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e 2024-12-11T02:28:26,492 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:28:26,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/4a96c3f05b764b40af160a1a70871840 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840 2024-12-11T02:28:26,496 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840, entries=150, sequenceid=380, filesize=12.0 K 2024-12-11T02:28:26,497 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 513ab21b2f5fe75f43e6defd51fe8517 in 1255ms, sequenceid=380, compaction requested=false 2024-12-11T02:28:26,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:26,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:26,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-11T02:28:26,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-11T02:28:26,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-11T02:28:26,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7150 sec 2024-12-11T02:28:26,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.7190 sec 2024-12-11T02:28:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:26,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:26,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/5fc925c9a8d143c7b64692c9098b7132 is 50, key is test_row_0/A:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:26,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742426_1602 (size=14741) 2024-12-11T02:28:26,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/5fc925c9a8d143c7b64692c9098b7132 2024-12-11T02:28:26,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/d4dd9684fb6041c4a4f06746b7b43b02 is 50, key is test_row_0/B:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:26,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742427_1603 (size=12301) 2024-12-11T02:28:26,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 352 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884166829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:26,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T02:28:26,887 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-11T02:28:26,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-11T02:28:26,890 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:26,891 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:26,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:26,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 354 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884166937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:26,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:27,042 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T02:28:27,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:27,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56796 deadline: 1733884167116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,118 DEBUG [Thread-2310 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:27,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56780 deadline: 1733884167134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,138 DEBUG [Thread-2308 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:27,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1733884167140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,143 DEBUG [Thread-2304 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:27,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 356 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884167144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56812 deadline: 1733884167146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,148 DEBUG [Thread-2312 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., hostname=5f57a24c5131,40311,1733883964600, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:27,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/d4dd9684fb6041c4a4f06746b7b43b02 2024-12-11T02:28:27,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/06fce957b9134b05a7a58dd060994a87 is 50, key is test_row_0/C:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:27,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742428_1604 (size=12301) 2024-12-11T02:28:27,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/06fce957b9134b05a7a58dd060994a87 2024-12-11T02:28:27,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/5fc925c9a8d143c7b64692c9098b7132 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132 2024-12-11T02:28:27,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:27,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T02:28:27,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:27,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:27,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132, entries=200, sequenceid=398, filesize=14.4 K 2024-12-11T02:28:27,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/d4dd9684fb6041c4a4f06746b7b43b02 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02 2024-12-11T02:28:27,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02, entries=150, sequenceid=398, filesize=12.0 K 2024-12-11T02:28:27,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/06fce957b9134b05a7a58dd060994a87 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87 2024-12-11T02:28:27,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87, entries=150, sequenceid=398, filesize=12.0 K 2024-12-11T02:28:27,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 513ab21b2f5fe75f43e6defd51fe8517 in 467ms, sequenceid=398, compaction requested=true 2024-12-11T02:28:27,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:27,210 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:27,210 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:27,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:27,212 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:27,212 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:27,212 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,212 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/9585914e51734012a7d4a76cf49312a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.8 K 2024-12-11T02:28:27,212 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:27,212 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:27,212 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9585914e51734012a7d4a76cf49312a1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:27,212 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,212 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/dcdac52ff04445179f562e35a4163bdc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=39.2 K 2024-12-11T02:28:27,213 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcdac52ff04445179f562e35a4163bdc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:27,213 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 603ebb45fb964cfca0881f401939087e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733884104399 2024-12-11T02:28:27,213 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d4dd9684fb6041c4a4f06746b7b43b02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:27,213 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93484a674ce94476b4fa3ea6f8a75e3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733884104399 2024-12-11T02:28:27,214 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fc925c9a8d143c7b64692c9098b7132, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:27,220 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:27,221 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/bb4859127b384e7fa0d7355c25a1dba7 is 50, key is test_row_0/A:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:27,223 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:27,224 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fb488ea934de43a5bddfd4f0aa19224c is 50, key is test_row_0/B:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:27,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742429_1605 (size=13221) 2024-12-11T02:28:27,232 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/bb4859127b384e7fa0d7355c25a1dba7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/bb4859127b384e7fa0d7355c25a1dba7 2024-12-11T02:28:27,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742430_1606 (size=13221) 2024-12-11T02:28:27,237 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into bb4859127b384e7fa0d7355c25a1dba7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:27,237 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:27,237 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=13, startTime=1733884107209; duration=0sec 2024-12-11T02:28:27,237 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:27,237 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:27,237 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:27,238 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/fb488ea934de43a5bddfd4f0aa19224c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fb488ea934de43a5bddfd4f0aa19224c 2024-12-11T02:28:27,238 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:27,239 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:27,239 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,239 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/46643e6f589a4de787c69a4abcae7428, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.8 K 2024-12-11T02:28:27,239 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46643e6f589a4de787c69a4abcae7428, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1733884103984 2024-12-11T02:28:27,240 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a96c3f05b764b40af160a1a70871840, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1733884104399 2024-12-11T02:28:27,241 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06fce957b9134b05a7a58dd060994a87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:27,243 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into fb488ea934de43a5bddfd4f0aa19224c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:27,243 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:27,243 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=13, startTime=1733884107210; duration=0sec 2024-12-11T02:28:27,244 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:27,244 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:27,249 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#521 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:27,250 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/cd21596a3ccb455db0abc13d8380d981 is 50, key is test_row_0/C:col10/1733884105616/Put/seqid=0 2024-12-11T02:28:27,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742431_1607 (size=13221) 2024-12-11T02:28:27,271 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/cd21596a3ccb455db0abc13d8380d981 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cd21596a3ccb455db0abc13d8380d981 2024-12-11T02:28:27,277 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into cd21596a3ccb455db0abc13d8380d981(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:27,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:27,277 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=13, startTime=1733884107210; duration=0sec 2024-12-11T02:28:27,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:27,277 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:27,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T02:28:27,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:27,349 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:27,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:27,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/1c4ac8955889487bbdb637e1840ee956 is 50, key is test_row_0/A:col10/1733884106791/Put/seqid=0 2024-12-11T02:28:27,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742432_1608 (size=12301) 2024-12-11T02:28:27,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:27,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. as already flushing 2024-12-11T02:28:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:27,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 371 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884167505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 373 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884167609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,760 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/1c4ac8955889487bbdb637e1840ee956 2024-12-11T02:28:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e3c31c6f28824d80b40c16c0d22be015 is 50, key is test_row_0/B:col10/1733884106791/Put/seqid=0 2024-12-11T02:28:27,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742433_1609 (size=12301) 2024-12-11T02:28:27,775 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e3c31c6f28824d80b40c16c0d22be015 2024-12-11T02:28:27,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/a48bdde0a8e744789ba2dc8223d4e84d is 50, key is test_row_0/C:col10/1733884106791/Put/seqid=0 2024-12-11T02:28:27,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742434_1610 (size=12301) 2024-12-11T02:28:27,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 375 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884167815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:27,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:28,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:28,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 377 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56850 deadline: 1733884168121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:28,191 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/a48bdde0a8e744789ba2dc8223d4e84d 2024-12-11T02:28:28,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/1c4ac8955889487bbdb637e1840ee956 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956 2024-12-11T02:28:28,198 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956, entries=150, sequenceid=421, filesize=12.0 K 2024-12-11T02:28:28,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/e3c31c6f28824d80b40c16c0d22be015 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015 2024-12-11T02:28:28,201 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015, entries=150, sequenceid=421, filesize=12.0 K 2024-12-11T02:28:28,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/a48bdde0a8e744789ba2dc8223d4e84d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d 2024-12-11T02:28:28,209 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d, entries=150, sequenceid=421, filesize=12.0 K 2024-12-11T02:28:28,210 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 513ab21b2f5fe75f43e6defd51fe8517 in 861ms, sequenceid=421, compaction requested=false 2024-12-11T02:28:28,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:28,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:28,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-11T02:28:28,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-11T02:28:28,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-11T02:28:28,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3200 sec 2024-12-11T02:28:28,214 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.3240 sec 2024-12-11T02:28:28,623 DEBUG [Thread-2317 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:63149 2024-12-11T02:28:28,623 DEBUG [Thread-2317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,624 DEBUG [Thread-2323 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cbfd84f to 127.0.0.1:63149 2024-12-11T02:28:28,624 DEBUG [Thread-2323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,625 DEBUG [Thread-2315 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:63149 2024-12-11T02:28:28,625 DEBUG [Thread-2315 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,627 DEBUG [Thread-2321 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x635b1751 to 127.0.0.1:63149 2024-12-11T02:28:28,627 DEBUG [Thread-2321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,628 DEBUG [Thread-2319 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d49886 to 127.0.0.1:63149 2024-12-11T02:28:28,628 DEBUG [Thread-2319 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:28,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-11T02:28:28,630 DEBUG [Thread-2306 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:63149 2024-12-11T02:28:28,630 DEBUG [Thread-2306 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:28,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:28,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/51a42fe28da547a4b6b3145692a5108f is 50, key is test_row_0/A:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:28,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742435_1611 (size=12301) 2024-12-11T02:28:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T02:28:28,994 INFO [Thread-2314 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-11T02:28:29,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/51a42fe28da547a4b6b3145692a5108f 2024-12-11T02:28:29,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/b240a01ba44a475bb73b0c5fd6d87e2f is 50, key is test_row_0/B:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:29,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742436_1612 (size=12301) 2024-12-11T02:28:29,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/b240a01ba44a475bb73b0c5fd6d87e2f 2024-12-11T02:28:29,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/37ae39a3cc0640f5ae0702c1e8e19d97 is 50, key is test_row_0/C:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:29,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742437_1613 (size=12301) 2024-12-11T02:28:29,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/37ae39a3cc0640f5ae0702c1e8e19d97 2024-12-11T02:28:29,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/51a42fe28da547a4b6b3145692a5108f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f 2024-12-11T02:28:29,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f, entries=150, sequenceid=438, filesize=12.0 K 2024-12-11T02:28:29,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/b240a01ba44a475bb73b0c5fd6d87e2f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f 2024-12-11T02:28:29,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f, entries=150, sequenceid=438, filesize=12.0 K 2024-12-11T02:28:29,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/37ae39a3cc0640f5ae0702c1e8e19d97 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97 2024-12-11T02:28:29,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97, entries=150, sequenceid=438, filesize=12.0 K 2024-12-11T02:28:29,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=0 B/0 for 513ab21b2f5fe75f43e6defd51fe8517 in 1239ms, sequenceid=438, compaction requested=true 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 513ab21b2f5fe75f43e6defd51fe8517:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:29,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:29,869 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:29,869 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/B is initiating minor compaction (all files) 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/A is initiating minor compaction (all files) 2024-12-11T02:28:29,870 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/A in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:29,870 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/B in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:29,870 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/bb4859127b384e7fa0d7355c25a1dba7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.9 K 2024-12-11T02:28:29,870 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fb488ea934de43a5bddfd4f0aa19224c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.9 K 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting fb488ea934de43a5bddfd4f0aa19224c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:29,870 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb4859127b384e7fa0d7355c25a1dba7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:29,871 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c31c6f28824d80b40c16c0d22be015, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733884106791 2024-12-11T02:28:29,871 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c4ac8955889487bbdb637e1840ee956, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733884106791 2024-12-11T02:28:29,871 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51a42fe28da547a4b6b3145692a5108f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733884107489 2024-12-11T02:28:29,871 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b240a01ba44a475bb73b0c5fd6d87e2f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733884107489 2024-12-11T02:28:29,876 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#B#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:29,876 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#A#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:29,877 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/5d8952da04a74fdbaf5e543e2a1cb52b is 50, key is test_row_0/A:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:29,877 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/589cfc360dd34338b8b82924e3ab6390 is 50, key is test_row_0/B:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:29,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742438_1614 (size=13323) 2024-12-11T02:28:29,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742439_1615 (size=13323) 2024-12-11T02:28:30,287 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/5d8952da04a74fdbaf5e543e2a1cb52b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5d8952da04a74fdbaf5e543e2a1cb52b 2024-12-11T02:28:30,287 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/589cfc360dd34338b8b82924e3ab6390 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/589cfc360dd34338b8b82924e3ab6390 2024-12-11T02:28:30,291 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/A of 513ab21b2f5fe75f43e6defd51fe8517 into 5d8952da04a74fdbaf5e543e2a1cb52b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:30,291 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/B of 513ab21b2f5fe75f43e6defd51fe8517 into 589cfc360dd34338b8b82924e3ab6390(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:30,291 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/A, priority=13, startTime=1733884109869; duration=0sec 2024-12-11T02:28:30,291 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/B, priority=13, startTime=1733884109869; duration=0sec 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:B 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:A 2024-12-11T02:28:30,291 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:30,292 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:30,292 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): 513ab21b2f5fe75f43e6defd51fe8517/C is initiating minor compaction (all files) 2024-12-11T02:28:30,292 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 513ab21b2f5fe75f43e6defd51fe8517/C in TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:30,292 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cd21596a3ccb455db0abc13d8380d981, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp, totalSize=36.9 K 2024-12-11T02:28:30,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cd21596a3ccb455db0abc13d8380d981, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733884105574 2024-12-11T02:28:30,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a48bdde0a8e744789ba2dc8223d4e84d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1733884106791 2024-12-11T02:28:30,293 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 37ae39a3cc0640f5ae0702c1e8e19d97, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733884107489 2024-12-11T02:28:30,298 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 513ab21b2f5fe75f43e6defd51fe8517#C#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:30,298 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/375ba1bb14324722ad25ca9310332853 is 50, key is test_row_0/C:col10/1733884107489/Put/seqid=0 2024-12-11T02:28:30,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742440_1616 (size=13323) 2024-12-11T02:28:30,706 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/375ba1bb14324722ad25ca9310332853 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/375ba1bb14324722ad25ca9310332853 2024-12-11T02:28:30,709 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 513ab21b2f5fe75f43e6defd51fe8517/C of 513ab21b2f5fe75f43e6defd51fe8517 into 375ba1bb14324722ad25ca9310332853(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:30,709 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:30,709 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517., storeName=513ab21b2f5fe75f43e6defd51fe8517/C, priority=13, startTime=1733884109869; duration=0sec 2024-12-11T02:28:30,709 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:30,709 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 513ab21b2f5fe75f43e6defd51fe8517:C 2024-12-11T02:28:32,933 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:28:37,160 DEBUG [Thread-2308 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:63149 2024-12-11T02:28:37,160 DEBUG [Thread-2308 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:37,173 DEBUG [Thread-2310 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:63149 2024-12-11T02:28:37,173 DEBUG [Thread-2310 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:37,244 DEBUG [Thread-2304 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:63149 2024-12-11T02:28:37,244 DEBUG [Thread-2304 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:37,246 DEBUG [Thread-2312 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6050584c to 127.0.0.1:63149 2024-12-11T02:28:37,246 DEBUG [Thread-2312 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 254 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2458 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7374 rows 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2453 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7359 rows 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2451 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7353 rows 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2464 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7392 rows 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2457 2024-12-11T02:28:37,246 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7371 rows 2024-12-11T02:28:37,246 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:28:37,246 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:63149 2024-12-11T02:28:37,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:28:37,249 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:28:37,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:28:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:37,253 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884117253"}]},"ts":"1733884117253"} 2024-12-11T02:28:37,254 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:28:37,256 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:28:37,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:28:37,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, UNASSIGN}] 2024-12-11T02:28:37,258 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, UNASSIGN 2024-12-11T02:28:37,259 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=513ab21b2f5fe75f43e6defd51fe8517, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:37,260 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:28:37,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure 513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:37,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:37,411 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:37,411 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:28:37,411 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing 513ab21b2f5fe75f43e6defd51fe8517, disabling compactions & flushes 2024-12-11T02:28:37,411 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:37,411 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. after waiting 0 ms 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:37,412 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing 513ab21b2f5fe75f43e6defd51fe8517 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=A 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=B 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 513ab21b2f5fe75f43e6defd51fe8517, store=C 2024-12-11T02:28:37,412 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:37,415 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e1bfbe0e75d347eea83e393943a5ff65 is 50, key is test_row_1/A:col10/1733884117172/Put/seqid=0 2024-12-11T02:28:37,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742441_1617 (size=9857) 2024-12-11T02:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:37,819 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e1bfbe0e75d347eea83e393943a5ff65 2024-12-11T02:28:37,825 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/1c2b75db83db466f9a527efa13d1f693 is 50, key is test_row_1/B:col10/1733884117172/Put/seqid=0 2024-12-11T02:28:37,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742442_1618 (size=9857) 2024-12-11T02:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:38,229 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/1c2b75db83db466f9a527efa13d1f693 2024-12-11T02:28:38,234 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/fde0f85f7d164e6885d75cda80f8233a is 50, key is test_row_1/C:col10/1733884117172/Put/seqid=0 2024-12-11T02:28:38,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742443_1619 (size=9857) 2024-12-11T02:28:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:38,637 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/fde0f85f7d164e6885d75cda80f8233a 2024-12-11T02:28:38,641 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/A/e1bfbe0e75d347eea83e393943a5ff65 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e1bfbe0e75d347eea83e393943a5ff65 2024-12-11T02:28:38,644 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e1bfbe0e75d347eea83e393943a5ff65, entries=100, sequenceid=448, filesize=9.6 K 2024-12-11T02:28:38,644 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/B/1c2b75db83db466f9a527efa13d1f693 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/1c2b75db83db466f9a527efa13d1f693 2024-12-11T02:28:38,647 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/1c2b75db83db466f9a527efa13d1f693, entries=100, sequenceid=448, filesize=9.6 K 2024-12-11T02:28:38,647 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/.tmp/C/fde0f85f7d164e6885d75cda80f8233a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/fde0f85f7d164e6885d75cda80f8233a 2024-12-11T02:28:38,650 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/fde0f85f7d164e6885d75cda80f8233a, entries=100, sequenceid=448, filesize=9.6 K 2024-12-11T02:28:38,651 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 513ab21b2f5fe75f43e6defd51fe8517 in 1239ms, sequenceid=448, compaction requested=false 2024-12-11T02:28:38,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/7dfe29bed2df437783b4149eff781823, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d11b0432482b41ba810f251eee3f7aac, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d125262ae9b496999163357a3c303be, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d650aadeeba404ba47a2576cf703d53, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/40831c05dd724217b8c1c22ed9028743, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/dcdac52ff04445179f562e35a4163bdc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/bb4859127b384e7fa0d7355c25a1dba7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f] to archive 2024-12-11T02:28:38,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:38,654 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f38796b7f56043d28564960f362ec3dd 2024-12-11T02:28:38,654 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/7dfe29bed2df437783b4149eff781823 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/7dfe29bed2df437783b4149eff781823 2024-12-11T02:28:38,654 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/cf065a994b0a4fe48782b57705b2ca90 2024-12-11T02:28:38,654 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/9c048ed55b904f6692b3bdeeb21d0722 2024-12-11T02:28:38,656 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/793739044bd74de5bb74d114595baff9 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/f14312bb11774fafb0060fe0ae308f64 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/328d649e82d74611884e9f54cefa69f4 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d11b0432482b41ba810f251eee3f7aac to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d11b0432482b41ba810f251eee3f7aac 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/498d60dfc0b346439a1745aecf1a82a1 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/18c0d2f85b3042869ffb47f2a7d1c6f9 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/8641a740fc654f95a5429c32b63dc82e 2024-12-11T02:28:38,657 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/95162c97f6f64112bc4237ca353212b0 2024-12-11T02:28:38,658 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d125262ae9b496999163357a3c303be to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d125262ae9b496999163357a3c303be 2024-12-11T02:28:38,658 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/814fc6d6d7304b0796e3c331033a911a 2024-12-11T02:28:38,658 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/d2ff777e209c4a5fbab76e2e31aa354e 2024-12-11T02:28:38,658 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/89496b29cd1943428078150730bf0c8d 2024-12-11T02:28:38,659 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d650aadeeba404ba47a2576cf703d53 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/4d650aadeeba404ba47a2576cf703d53 2024-12-11T02:28:38,659 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/41d3261bf83e4d4a83d6b2dad2bd7216 2024-12-11T02:28:38,659 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/a82bc06fe2284223829aac673cd03c6d 2024-12-11T02:28:38,660 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e4ba8d38ee9248f4901d04ce2822b54e 2024-12-11T02:28:38,660 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/dcdac52ff04445179f562e35a4163bdc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/dcdac52ff04445179f562e35a4163bdc 2024-12-11T02:28:38,661 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e34cca3f02984327a669201ba84f18c5 2024-12-11T02:28:38,661 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/c40f9cbe543046eca39a1370496df906 2024-12-11T02:28:38,661 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/93484a674ce94476b4fa3ea6f8a75e3c 2024-12-11T02:28:38,661 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/20bcdba1a3d34e0f92da8714f411f523 2024-12-11T02:28:38,661 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5fc925c9a8d143c7b64692c9098b7132 2024-12-11T02:28:38,662 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/1c4ac8955889487bbdb637e1840ee956 2024-12-11T02:28:38,662 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/bb4859127b384e7fa0d7355c25a1dba7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/bb4859127b384e7fa0d7355c25a1dba7 2024-12-11T02:28:38,662 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/51a42fe28da547a4b6b3145692a5108f 2024-12-11T02:28:38,664 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/40831c05dd724217b8c1c22ed9028743 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/40831c05dd724217b8c1c22ed9028743 2024-12-11T02:28:38,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bdc69716e44a41cda318ac3005a83280, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7413a5cd0604890bb584268abcd19c8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/30cc0e256bd44fd4b827d7261ac4e601, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/545ca0f289fb4a238eb14be8d03071c9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/8f277de711164713afbf644b3d846e8f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/9585914e51734012a7d4a76cf49312a1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fb488ea934de43a5bddfd4f0aa19224c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f] to archive 2024-12-11T02:28:38,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:38,667 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0d2e6a4886634cc1bbb5317fdef00239 2024-12-11T02:28:38,667 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/03e98551e8cd40eea97ad14c122f0269 2024-12-11T02:28:38,667 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/7f600d754e06465189af1c93246eee75 2024-12-11T02:28:38,667 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bdc69716e44a41cda318ac3005a83280 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bdc69716e44a41cda318ac3005a83280 2024-12-11T02:28:38,672 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fe8b3acd724b4c01858c5bfb04ce8ee7 2024-12-11T02:28:38,672 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/cb67d1ca61754cd6a1340dcbc9fc78fb 2024-12-11T02:28:38,672 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7413a5cd0604890bb584268abcd19c8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7413a5cd0604890bb584268abcd19c8 2024-12-11T02:28:38,672 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fbbdb3d7d0cd4be1bd4e58376c1e76f8 2024-12-11T02:28:38,673 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/30cc0e256bd44fd4b827d7261ac4e601 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/30cc0e256bd44fd4b827d7261ac4e601 2024-12-11T02:28:38,673 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0823c613c9ef4cce8156b200cf43d0a1 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0686e10c445643268af8172aac9cc613 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/2a9759fbde8444d4b1ae2670c99b8291 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/04c47290510e464bafc615b11ccec373 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/0170efd5633740e082cdf44e8e4518a9 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e7bd9c1b6e064319a5242a1256085ed4 2024-12-11T02:28:38,674 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/545ca0f289fb4a238eb14be8d03071c9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/545ca0f289fb4a238eb14be8d03071c9 2024-12-11T02:28:38,675 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/bef242684df74f37988999dc4e985f0d 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/12781ec311674d3abeb4e60857af0bfa 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/170554fc147143af9d846ac1e44e296f 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a94bbc19ccc24acf93aefa325532cffa 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/a38abc5bfdd44fa1af27183a104650b6 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/8f277de711164713afbf644b3d846e8f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/8f277de711164713afbf644b3d846e8f 2024-12-11T02:28:38,676 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/63b7b3e21465495eb36d75c9dd28c7e1 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/9585914e51734012a7d4a76cf49312a1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/9585914e51734012a7d4a76cf49312a1 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/ee22de686aad4639a92a4cfc29245dae 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fb488ea934de43a5bddfd4f0aa19224c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/fb488ea934de43a5bddfd4f0aa19224c 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/d4dd9684fb6041c4a4f06746b7b43b02 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/e3c31c6f28824d80b40c16c0d22be015 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/b240a01ba44a475bb73b0c5fd6d87e2f 2024-12-11T02:28:38,677 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/603ebb45fb964cfca0881f401939087e 2024-12-11T02:28:38,678 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/d895c101ea1d44e0b691d15e99cc382b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/1394bb57a55e4acbb357d861d2eb055c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f90a0054f2bf41429f7c4bbae4a77652, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/c9dede7260b94a13b499d8cf800c6bbc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/ead2f317af844393a07df314e1582004, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/46643e6f589a4de787c69a4abcae7428, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cd21596a3ccb455db0abc13d8380d981, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97] to archive 2024-12-11T02:28:38,679 DEBUG [StoreCloser-TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/de77ef1042374c89b32ed77e54af09f6 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/973d81878d624dc5b14a367879e87759 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/e0d7b064419a4bf6bd2a6f149a360c0d 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/d895c101ea1d44e0b691d15e99cc382b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/d895c101ea1d44e0b691d15e99cc382b 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2e57ea4525344af2bda339360d149f8a 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/1394bb57a55e4acbb357d861d2eb055c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/1394bb57a55e4acbb357d861d2eb055c 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/22f7a44098ae4102bbddbefffe246328 2024-12-11T02:28:38,681 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/80dafb48879b4348abe9b4223814ffc1 2024-12-11T02:28:38,682 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/2b4e644cd74c4f07909d465d185d2c34 2024-12-11T02:28:38,682 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bcfa1a6b19a24fb1a9d2c285d28e1964 2024-12-11T02:28:38,683 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/6cd4a34133b943e197d5de5c8645fe03 2024-12-11T02:28:38,683 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/14b0b4da6e5a4827a3b241efe407a7fc 2024-12-11T02:28:38,683 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/c9dede7260b94a13b499d8cf800c6bbc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/c9dede7260b94a13b499d8cf800c6bbc 2024-12-11T02:28:38,683 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f90a0054f2bf41429f7c4bbae4a77652 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f90a0054f2bf41429f7c4bbae4a77652 2024-12-11T02:28:38,683 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bd72226452404169b0ab574eccbd80a3 2024-12-11T02:28:38,684 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/92be1f6955bd461aa0f0e4afb5f2398c 2024-12-11T02:28:38,684 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cb60472ce4b145138f59f66c03411dc4 2024-12-11T02:28:38,684 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/ead2f317af844393a07df314e1582004 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/ead2f317af844393a07df314e1582004 2024-12-11T02:28:38,685 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/bc101f2787ba48bca8708d2462912ed1 2024-12-11T02:28:38,685 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f5ded79c4aee484ab683420143479ef4 2024-12-11T02:28:38,685 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/32899181628d46dabf6b4d3aa74c04ab 2024-12-11T02:28:38,685 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/84519cac012c4ea6b984006ea4797f75 2024-12-11T02:28:38,685 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/f9e917c7a96c483b954f57942facee4d 2024-12-11T02:28:38,686 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/46643e6f589a4de787c69a4abcae7428 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/46643e6f589a4de787c69a4abcae7428 2024-12-11T02:28:38,686 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/707fda4bca6d4263be83be0e459aa820 2024-12-11T02:28:38,686 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/4a96c3f05b764b40af160a1a70871840 2024-12-11T02:28:38,686 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/06fce957b9134b05a7a58dd060994a87 2024-12-11T02:28:38,686 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cd21596a3ccb455db0abc13d8380d981 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/cd21596a3ccb455db0abc13d8380d981 2024-12-11T02:28:38,687 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/37ae39a3cc0640f5ae0702c1e8e19d97 2024-12-11T02:28:38,687 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/a48bdde0a8e744789ba2dc8223d4e84d 2024-12-11T02:28:38,690 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/recovered.edits/451.seqid, newMaxSeqId=451, maxSeqId=1 2024-12-11T02:28:38,690 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517. 2024-12-11T02:28:38,690 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for 513ab21b2f5fe75f43e6defd51fe8517: 2024-12-11T02:28:38,692 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed 513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:38,692 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=513ab21b2f5fe75f43e6defd51fe8517, regionState=CLOSED 2024-12-11T02:28:38,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-11T02:28:38,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure 513ab21b2f5fe75f43e6defd51fe8517, server=5f57a24c5131,40311,1733883964600 in 1.4330 sec 2024-12-11T02:28:38,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-11T02:28:38,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=513ab21b2f5fe75f43e6defd51fe8517, UNASSIGN in 1.4360 sec 2024-12-11T02:28:38,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-11T02:28:38,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4380 sec 2024-12-11T02:28:38,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884118697"}]},"ts":"1733884118697"} 2024-12-11T02:28:38,698 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:28:38,699 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:28:38,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4500 sec 2024-12-11T02:28:39,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T02:28:39,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-11T02:28:39,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:28:39,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,358 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T02:28:39,359 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,360 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:39,361 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/recovered.edits] 2024-12-11T02:28:39,364 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e1bfbe0e75d347eea83e393943a5ff65 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/e1bfbe0e75d347eea83e393943a5ff65 2024-12-11T02:28:39,364 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5d8952da04a74fdbaf5e543e2a1cb52b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/A/5d8952da04a74fdbaf5e543e2a1cb52b 2024-12-11T02:28:39,366 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/1c2b75db83db466f9a527efa13d1f693 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/1c2b75db83db466f9a527efa13d1f693 2024-12-11T02:28:39,366 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/589cfc360dd34338b8b82924e3ab6390 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/B/589cfc360dd34338b8b82924e3ab6390 2024-12-11T02:28:39,368 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/375ba1bb14324722ad25ca9310332853 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/375ba1bb14324722ad25ca9310332853 2024-12-11T02:28:39,368 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/fde0f85f7d164e6885d75cda80f8233a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/C/fde0f85f7d164e6885d75cda80f8233a 2024-12-11T02:28:39,370 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/recovered.edits/451.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517/recovered.edits/451.seqid 2024-12-11T02:28:39,370 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/513ab21b2f5fe75f43e6defd51fe8517 2024-12-11T02:28:39,370 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:28:39,372 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,373 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:28:39,375 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:28:39,376 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,376 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:28:39,376 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733884119376"}]},"ts":"9223372036854775807"} 2024-12-11T02:28:39,377 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:28:39,377 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 513ab21b2f5fe75f43e6defd51fe8517, NAME => 'TestAcidGuarantees,,1733884086430.513ab21b2f5fe75f43e6defd51fe8517.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:28:39,378 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:28:39,378 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733884119378"}]},"ts":"9223372036854775807"} 2024-12-11T02:28:39,379 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:28:39,381 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 24 msec 2024-12-11T02:28:39,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T02:28:39,460 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-11T02:28:39,469 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 243) - Thread LEAK? -, OpenFileDescriptor=447 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=361 (was 375), ProcessCount=11 (was 11), AvailableMemoryMB=4282 (was 4296) 2024-12-11T02:28:39,480 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=361, ProcessCount=11, AvailableMemoryMB=4281 2024-12-11T02:28:39,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:28:39,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:28:39,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:39,482 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T02:28:39,482 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:39,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-12-11T02:28:39,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:39,483 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T02:28:39,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742444_1620 (size=963) 2024-12-11T02:28:39,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:39,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:39,889 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6 2024-12-11T02:28:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742445_1621 (size=53) 2024-12-11T02:28:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:40,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:28:40,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ee5747d737c855bb22265bdc2d0c886b, disabling compactions & flushes 2024-12-11T02:28:40,294 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. after waiting 0 ms 2024-12-11T02:28:40,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,294 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,295 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:40,295 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T02:28:40,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733884120295"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733884120295"}]},"ts":"1733884120295"} 2024-12-11T02:28:40,297 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T02:28:40,297 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T02:28:40,297 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884120297"}]},"ts":"1733884120297"} 2024-12-11T02:28:40,298 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T02:28:40,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, ASSIGN}] 2024-12-11T02:28:40,302 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, ASSIGN 2024-12-11T02:28:40,303 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, ASSIGN; state=OFFLINE, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=false 2024-12-11T02:28:40,453 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:40,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:40,606 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:40,608 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,608 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:28:40,609 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,609 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:28:40,609 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,609 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,610 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,611 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:40,611 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName A 2024-12-11T02:28:40,611 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:40,612 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:40,612 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,613 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:40,613 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName B 2024-12-11T02:28:40,613 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:40,613 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:40,613 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,614 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:40,614 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName C 2024-12-11T02:28:40,614 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:40,614 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:40,615 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,615 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,615 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,617 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:28:40,617 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:40,619 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T02:28:40,619 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened ee5747d737c855bb22265bdc2d0c886b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67811148, jitterRate=0.010464847087860107}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:28:40,620 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:40,621 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., pid=157, masterSystemTime=1733884120606 2024-12-11T02:28:40,622 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,622 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:40,622 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=OPEN, openSeqNum=2, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:40,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-11T02:28:40,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 in 169 msec 2024-12-11T02:28:40,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-11T02:28:40,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, ASSIGN in 323 msec 2024-12-11T02:28:40,626 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T02:28:40,626 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884120626"}]},"ts":"1733884120626"} 2024-12-11T02:28:40,626 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T02:28:40,629 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T02:28:40,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-12-11T02:28:41,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-11T02:28:41,587 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-11T02:28:41,588 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-12-11T02:28:41,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:41,594 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:41,595 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:41,596 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T02:28:41,597 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T02:28:41,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T02:28:41,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T02:28:41,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T02:28:41,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742446_1622 (size=999) 2024-12-11T02:28:42,008 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T02:28:42,008 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T02:28:42,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:28:42,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, REOPEN/MOVE}] 2024-12-11T02:28:42,012 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, REOPEN/MOVE 2024-12-11T02:28:42,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,013 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:28:42,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:42,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,165 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:28:42,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing ee5747d737c855bb22265bdc2d0c886b, disabling compactions & flushes 2024-12-11T02:28:42,165 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. after waiting 0 ms 2024-12-11T02:28:42,165 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,169 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T02:28:42,169 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,169 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:42,169 WARN [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: ee5747d737c855bb22265bdc2d0c886b to self. 2024-12-11T02:28:42,171 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,171 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=CLOSED 2024-12-11T02:28:42,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-11T02:28:42,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 in 159 msec 2024-12-11T02:28:42,173 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, REOPEN/MOVE; state=CLOSED, location=5f57a24c5131,40311,1733883964600; forceNewPlan=false, retain=true 2024-12-11T02:28:42,324 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=OPENING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:28:42,476 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,479 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,479 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} 2024-12-11T02:28:42,479 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,479 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T02:28:42,479 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,479 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,480 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,481 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:42,481 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName A 2024-12-11T02:28:42,482 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:42,482 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:42,483 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,483 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:42,483 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName B 2024-12-11T02:28:42,483 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:42,484 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:42,484 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,484 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T02:28:42,484 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee5747d737c855bb22265bdc2d0c886b columnFamilyName C 2024-12-11T02:28:42,484 DEBUG [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:42,485 INFO [StoreOpener-ee5747d737c855bb22265bdc2d0c886b-1 {}] regionserver.HStore(327): Store=ee5747d737c855bb22265bdc2d0c886b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T02:28:42,485 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,485 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,486 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,487 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T02:28:42,488 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,488 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened ee5747d737c855bb22265bdc2d0c886b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66681045, jitterRate=-0.0063749998807907104}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T02:28:42,489 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:42,490 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., pid=162, masterSystemTime=1733884122476 2024-12-11T02:28:42,491 DEBUG [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,491 INFO [RS_OPEN_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,491 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=OPEN, openSeqNum=5, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-12-11T02:28:42,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 in 167 msec 2024-12-11T02:28:42,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-11T02:28:42,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, REOPEN/MOVE in 482 msec 2024-12-11T02:28:42,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-11T02:28:42,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-12-11T02:28:42,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 897 msec 2024-12-11T02:28:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T02:28:42,498 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-12-11T02:28:42,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,502 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-12-11T02:28:42,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,506 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-12-11T02:28:42,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,509 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-12-11T02:28:42,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,512 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-12-11T02:28:42,514 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,514 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-12-11T02:28:42,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x503a7d2e to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79be903c 2024-12-11T02:28:42,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67adb273, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,520 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x404bb685 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d79f1c0 2024-12-11T02:28:42,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@474dec36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42aacb30 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40dfd554 2024-12-11T02:28:42,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68dbad25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,526 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e06176 to 127.0.0.1:63149 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@582b6d8b 2024-12-11T02:28:42,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d2c412e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T02:28:42,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-11T02:28:42,532 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:42,532 DEBUG [hconnection-0x654cc384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:42,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:42,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:42,533 DEBUG [hconnection-0x6280adf4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,534 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,534 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49680, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,534 DEBUG [hconnection-0x2676c309-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,535 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,536 DEBUG [hconnection-0x2c7a25a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,537 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:42,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:28:42,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:42,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:42,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:42,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:42,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:42,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:42,556 DEBUG [hconnection-0x6bbc4b40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,557 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49712, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,568 DEBUG [hconnection-0x567f447f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,570 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884182570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884182570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884182571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,576 DEBUG [hconnection-0x2bfa82b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,576 DEBUG [hconnection-0x5c67d8c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,577 DEBUG [hconnection-0x549e590e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,577 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,577 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,578 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,578 DEBUG [hconnection-0x4693f4d3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T02:28:42,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,580 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49760, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T02:28:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884182579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884182582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121154a67ff19c9e4d2cadbc989a2d763e60_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884122541/Put/seqid=0 2024-12-11T02:28:42,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742447_1623 (size=12154) 2024-12-11T02:28:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:42,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884182672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884182672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884182674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884182682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,684 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884182685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:42,838 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:42,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:42,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884182876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884182876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884182876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884182884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884182886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,992 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:42,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:42,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:42,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:42,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:42,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,022 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:43,026 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121154a67ff19c9e4d2cadbc989a2d763e60_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121154a67ff19c9e4d2cadbc989a2d763e60_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:43,026 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/f25b269b51c349f4b1c70f24090f01e8, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:43,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/f25b269b51c349f4b1c70f24090f01e8 is 175, key is test_row_0/A:col10/1733884122541/Put/seqid=0 2024-12-11T02:28:43,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742448_1624 (size=30955) 2024-12-11T02:28:43,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:43,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884183179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884183179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884183180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884183188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884183188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,298 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,298 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,432 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/f25b269b51c349f4b1c70f24090f01e8 2024-12-11T02:28:43,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/c53965a06dc14172a526f70e675f93cc is 50, key is test_row_0/B:col10/1733884122541/Put/seqid=0 2024-12-11T02:28:43,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742449_1625 (size=12001) 2024-12-11T02:28:43,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/c53965a06dc14172a526f70e675f93cc 2024-12-11T02:28:43,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/8931b62d6f324c72bb56a5c0b05df3af is 50, key is test_row_0/C:col10/1733884122541/Put/seqid=0 2024-12-11T02:28:43,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742450_1626 (size=12001) 2024-12-11T02:28:43,603 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:43,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884183685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884183685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884183686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884183695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:43,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884183696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/8931b62d6f324c72bb56a5c0b05df3af 2024-12-11T02:28:43,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/f25b269b51c349f4b1c70f24090f01e8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8 2024-12-11T02:28:43,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8, entries=150, sequenceid=15, filesize=30.2 K 2024-12-11T02:28:43,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/c53965a06dc14172a526f70e675f93cc as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc 2024-12-11T02:28:43,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc, entries=150, sequenceid=15, filesize=11.7 K 2024-12-11T02:28:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/8931b62d6f324c72bb56a5c0b05df3af as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af 2024-12-11T02:28:43,909 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:43,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:43,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:43,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:43,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:43,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af, entries=150, sequenceid=15, filesize=11.7 K 2024-12-11T02:28:43,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ee5747d737c855bb22265bdc2d0c886b in 1370ms, sequenceid=15, compaction requested=false 2024-12-11T02:28:43,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:44,062 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:44,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:44,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:44,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118807ee7c9b144f288bba79336c113e1b_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884122568/Put/seqid=0 2024-12-11T02:28:44,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742451_1627 (size=12154) 2024-12-11T02:28:44,224 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T02:28:44,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:44,481 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118807ee7c9b144f288bba79336c113e1b_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118807ee7c9b144f288bba79336c113e1b_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c23f65b7254c7abbc50cbd1a50f600, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:44,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c23f65b7254c7abbc50cbd1a50f600 is 175, key is test_row_0/A:col10/1733884122568/Put/seqid=0 2024-12-11T02:28:44,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742452_1628 (size=30955) 2024-12-11T02:28:44,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:44,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:44,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:44,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884184701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884184702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884184702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884184704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884184704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884184805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884184805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884184805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:44,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884184805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:44,888 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c23f65b7254c7abbc50cbd1a50f600 2024-12-11T02:28:44,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 is 50, key is test_row_0/B:col10/1733884122568/Put/seqid=0 2024-12-11T02:28:44,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742453_1629 (size=12001) 2024-12-11T02:28:44,913 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 2024-12-11T02:28:44,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/3e76601c00d044ba96705095ee589c9a is 50, key is test_row_0/C:col10/1733884122568/Put/seqid=0 2024-12-11T02:28:44,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742454_1630 (size=12001) 2024-12-11T02:28:45,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884185008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884185008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884185008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884185008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884185310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884185310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884185311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884185311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,325 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/3e76601c00d044ba96705095ee589c9a 2024-12-11T02:28:45,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c23f65b7254c7abbc50cbd1a50f600 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600 2024-12-11T02:28:45,333 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600, entries=150, sequenceid=40, filesize=30.2 K 2024-12-11T02:28:45,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 2024-12-11T02:28:45,338 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T02:28:45,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/3e76601c00d044ba96705095ee589c9a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a 2024-12-11T02:28:45,342 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T02:28:45,342 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ee5747d737c855bb22265bdc2d0c886b in 1279ms, sequenceid=40, compaction requested=false 2024-12-11T02:28:45,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:45,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:45,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-11T02:28:45,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-11T02:28:45,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-11T02:28:45,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8100 sec 2024-12-11T02:28:45,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.8150 sec 2024-12-11T02:28:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:45,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:45,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:45,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211925863aa6c27465bbced81c6d0ab6f46_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:45,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742455_1631 (size=12154) 2024-12-11T02:28:45,826 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:45,829 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211925863aa6c27465bbced81c6d0ab6f46_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211925863aa6c27465bbced81c6d0ab6f46_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:45,830 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4e00f821097d4f2784a21b5c701a2066, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:45,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4e00f821097d4f2784a21b5c701a2066 is 175, key is test_row_0/A:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:45,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742456_1632 (size=30955) 2024-12-11T02:28:45,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884185862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884185862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884185863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884185865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884185965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884185966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884185968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:45,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884185968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884186169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884186169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884186170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884186172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,234 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4e00f821097d4f2784a21b5c701a2066 2024-12-11T02:28:46,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/4a48e9afb45c41cca15868ece8212ff3 is 50, key is test_row_0/B:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:46,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742457_1633 (size=12001) 2024-12-11T02:28:46,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/4a48e9afb45c41cca15868ece8212ff3 2024-12-11T02:28:46,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/66d0d4f9514344f3a9fa29580b8bbead is 50, key is test_row_0/C:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742458_1634 (size=12001) 2024-12-11T02:28:46,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884186471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884186474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884186475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884186475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-11T02:28:46,637 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-11T02:28:46,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-11T02:28:46,641 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T02:28:46,642 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:46,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:46,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/66d0d4f9514344f3a9fa29580b8bbead 2024-12-11T02:28:46,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4e00f821097d4f2784a21b5c701a2066 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066 2024-12-11T02:28:46,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066, entries=150, sequenceid=53, filesize=30.2 K 2024-12-11T02:28:46,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/4a48e9afb45c41cca15868ece8212ff3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3 2024-12-11T02:28:46,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3, entries=150, sequenceid=53, filesize=11.7 K 2024-12-11T02:28:46,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/66d0d4f9514344f3a9fa29580b8bbead as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead 2024-12-11T02:28:46,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead, entries=150, sequenceid=53, filesize=11.7 K 2024-12-11T02:28:46,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ee5747d737c855bb22265bdc2d0c886b in 857ms, sequenceid=53, compaction requested=true 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:46,673 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:46,673 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:46,674 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:46,674 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:46,674 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,675 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.2 K 2024-12-11T02:28:46,675 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:46,675 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:46,675 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,675 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=90.7 K 2024-12-11T02:28:46,675 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,675 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066] 2024-12-11T02:28:46,675 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c53965a06dc14172a526f70e675f93cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733884122540 2024-12-11T02:28:46,675 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting f25b269b51c349f4b1c70f24090f01e8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733884122540 2024-12-11T02:28:46,676 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70c23f65b7254c7abbc50cbd1a50f600, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884122564 2024-12-11T02:28:46,676 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 90dd7cc43c2545d3ab48bcaa3c0bb5b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884122564 2024-12-11T02:28:46,676 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e00f821097d4f2784a21b5c701a2066, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:46,676 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a48e9afb45c41cca15868ece8212ff3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:46,682 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:46,686 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#544 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:46,686 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/dffdb6374cff4c6cb55eb29f9edb3a39 is 50, key is test_row_0/B:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:46,688 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121175f936a1719241ae837fc9bc737c4941_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:46,689 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121175f936a1719241ae837fc9bc737c4941_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:46,689 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121175f936a1719241ae837fc9bc737c4941_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:46,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742459_1635 (size=12104) 2024-12-11T02:28:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:46,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:46,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T02:28:46,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742460_1636 (size=4469) 2024-12-11T02:28:46,749 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#543 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:46,749 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/58a77e271d6b434ab2fb8b158bcfa878 is 175, key is test_row_0/A:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:46,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211cf7a075897b44fd19157639effc346b9_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884125857/Put/seqid=0 2024-12-11T02:28:46,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742461_1637 (size=31058) 2024-12-11T02:28:46,763 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/58a77e271d6b434ab2fb8b158bcfa878 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878 2024-12-11T02:28:46,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742462_1638 (size=12154) 2024-12-11T02:28:46,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,767 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 58a77e271d6b434ab2fb8b158bcfa878(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884186764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,767 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:46,767 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884126673; duration=0sec 2024-12-11T02:28:46,767 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:46,768 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:46,768 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:46,768 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:46,769 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:46,769 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:46,769 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,769 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.2 K 2024-12-11T02:28:46,769 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8931b62d6f324c72bb56a5c0b05df3af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733884122540 2024-12-11T02:28:46,770 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e76601c00d044ba96705095ee589c9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733884122564 2024-12-11T02:28:46,770 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66d0d4f9514344f3a9fa29580b8bbead, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:46,771 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211cf7a075897b44fd19157639effc346b9_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211cf7a075897b44fd19157639effc346b9_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:46,772 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/e5cc64256f704734b5c819dcba9bdd01, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:46,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/e5cc64256f704734b5c819dcba9bdd01 is 175, key is test_row_0/A:col10/1733884125857/Put/seqid=0 2024-12-11T02:28:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742463_1639 (size=30955) 2024-12-11T02:28:46,776 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/e5cc64256f704734b5c819dcba9bdd01 2024-12-11T02:28:46,782 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#546 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:46,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/e1495e1233644834bc6e8682875231d4 is 50, key is test_row_0/B:col10/1733884125857/Put/seqid=0 2024-12-11T02:28:46,782 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/d1dc45d8bc48451fb4c930ef5cd7034a is 50, key is test_row_0/C:col10/1733884124698/Put/seqid=0 2024-12-11T02:28:46,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742464_1640 (size=12104) 2024-12-11T02:28:46,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-11T02:28:46,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,801 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/d1dc45d8bc48451fb4c930ef5cd7034a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d1dc45d8bc48451fb4c930ef5cd7034a 2024-12-11T02:28:46,806 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into d1dc45d8bc48451fb4c930ef5cd7034a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:46,806 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:46,806 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884126673; duration=0sec 2024-12-11T02:28:46,806 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:46,806 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:46,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742465_1641 (size=12001) 2024-12-11T02:28:46,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/e1495e1233644834bc6e8682875231d4 2024-12-11T02:28:46,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/d0dc989ec73445c0a180e587aaad5537 is 50, key is test_row_0/C:col10/1733884125857/Put/seqid=0 2024-12-11T02:28:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742466_1642 (size=12001) 2024-12-11T02:28:46,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884186868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T02:28:46,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-11T02:28:46,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:46,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:46,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:46,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884186974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884186977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884186978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:46,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884186981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884187071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,098 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/dffdb6374cff4c6cb55eb29f9edb3a39 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/dffdb6374cff4c6cb55eb29f9edb3a39 2024-12-11T02:28:47,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-11T02:28:47,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:47,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,103 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into dffdb6374cff4c6cb55eb29f9edb3a39(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:47,103 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:47,103 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884126673; duration=0sec 2024-12-11T02:28:47,103 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:47,103 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:47,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/d0dc989ec73445c0a180e587aaad5537 2024-12-11T02:28:47,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/e5cc64256f704734b5c819dcba9bdd01 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01 2024-12-11T02:28:47,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01, entries=150, sequenceid=77, filesize=30.2 K 2024-12-11T02:28:47,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/e1495e1233644834bc6e8682875231d4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4 2024-12-11T02:28:47,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T02:28:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/d0dc989ec73445c0a180e587aaad5537 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537 2024-12-11T02:28:47,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T02:28:47,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ee5747d737c855bb22265bdc2d0c886b in 520ms, sequenceid=77, compaction requested=false 2024-12-11T02:28:47,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:47,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T02:28:47,252 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,253 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121138a84c7994b24b9b832eb2b547defcfd_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884126763/Put/seqid=0 2024-12-11T02:28:47,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742467_1643 (size=12154) 2024-12-11T02:28:47,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:47,268 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121138a84c7994b24b9b832eb2b547defcfd_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121138a84c7994b24b9b832eb2b547defcfd_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:47,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4ee20f82cd6b4844a2ce06adc76d0f71, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:47,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4ee20f82cd6b4844a2ce06adc76d0f71 is 175, key is test_row_0/A:col10/1733884126763/Put/seqid=0 2024-12-11T02:28:47,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742468_1644 (size=30955) 2024-12-11T02:28:47,275 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4ee20f82cd6b4844a2ce06adc76d0f71 2024-12-11T02:28:47,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/a5bc5a212d6348888244b000743a1359 is 50, key is test_row_0/B:col10/1733884126763/Put/seqid=0 2024-12-11T02:28:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742469_1645 (size=12001) 2024-12-11T02:28:47,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:47,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:47,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884187431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884187535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,687 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/a5bc5a212d6348888244b000743a1359 2024-12-11T02:28:47,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/232473155fb648f093c2bca235bbb290 is 50, key is test_row_0/C:col10/1733884126763/Put/seqid=0 2024-12-11T02:28:47,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742470_1646 (size=12001) 2024-12-11T02:28:47,698 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/232473155fb648f093c2bca235bbb290 2024-12-11T02:28:47,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4ee20f82cd6b4844a2ce06adc76d0f71 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71 2024-12-11T02:28:47,705 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71, entries=150, sequenceid=92, filesize=30.2 K 2024-12-11T02:28:47,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/a5bc5a212d6348888244b000743a1359 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359 2024-12-11T02:28:47,709 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T02:28:47,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/232473155fb648f093c2bca235bbb290 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290 2024-12-11T02:28:47,718 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T02:28:47,718 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ee5747d737c855bb22265bdc2d0c886b in 465ms, sequenceid=92, compaction requested=true 2024-12-11T02:28:47,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:47,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-11T02:28:47,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-11T02:28:47,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-11T02:28:47,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0780 sec 2024-12-11T02:28:47,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.0820 sec 2024-12-11T02:28:47,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:47,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T02:28:47,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:47,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T02:28:47,745 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-11T02:28:47,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:47,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116659b8003c644a8aa963ef84c6e182b0_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:47,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-11T02:28:47,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:47,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:47,751 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:47,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:47,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742471_1647 (size=12154) 2024-12-11T02:28:47,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884187766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:47,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884187869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,903 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:47,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:47,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:47,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:47,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884187981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884187984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884187987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:47,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884187987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:48,055 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:48,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884188071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,154 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:48,157 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116659b8003c644a8aa963ef84c6e182b0_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116659b8003c644a8aa963ef84c6e182b0_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:48,158 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/26d7bd1418ce4acaa5edbb6534f0e6de, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:48,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/26d7bd1418ce4acaa5edbb6534f0e6de is 175, key is test_row_0/A:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:48,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742472_1648 (size=30955) 2024-12-11T02:28:48,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:48,361 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:48,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884188375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,563 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/26d7bd1418ce4acaa5edbb6534f0e6de 2024-12-11T02:28:48,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/951ae604bfcf4ef7b6ce4648e5a5e37a is 50, key is test_row_0/B:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:48,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742473_1649 (size=12001) 2024-12-11T02:28:48,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:48,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884188877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,972 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:48,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:48,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:48,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:48,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:48,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/951ae604bfcf4ef7b6ce4648e5a5e37a 2024-12-11T02:28:48,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/86cf1151002b4f0a8672214131b8bc62 is 50, key is test_row_0/C:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742474_1650 (size=12001) 2024-12-11T02:28:49,125 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:49,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:49,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:49,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:49,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:49,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:49,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:49,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/86cf1151002b4f0a8672214131b8bc62 2024-12-11T02:28:49,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/26d7bd1418ce4acaa5edbb6534f0e6de as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de 2024-12-11T02:28:49,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de, entries=150, sequenceid=117, filesize=30.2 K 2024-12-11T02:28:49,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/951ae604bfcf4ef7b6ce4648e5a5e37a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a 2024-12-11T02:28:49,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a, entries=150, sequenceid=117, filesize=11.7 K 2024-12-11T02:28:49,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/86cf1151002b4f0a8672214131b8bc62 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62 2024-12-11T02:28:49,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62, entries=150, sequenceid=117, filesize=11.7 K 2024-12-11T02:28:49,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ee5747d737c855bb22265bdc2d0c886b in 1666ms, sequenceid=117, compaction requested=true 2024-12-11T02:28:49,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:49,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:49,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:49,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:49,409 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:49,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:49,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:49,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:49,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:49,410 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:49,410 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:49,410 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,410 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=121.0 K 2024-12-11T02:28:49,410 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,410 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de] 2024-12-11T02:28:49,411 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:49,411 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:49,411 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,411 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/dffdb6374cff4c6cb55eb29f9edb3a39, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=47.0 K 2024-12-11T02:28:49,411 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58a77e271d6b434ab2fb8b158bcfa878, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:49,411 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting dffdb6374cff4c6cb55eb29f9edb3a39, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:49,411 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5cc64256f704734b5c819dcba9bdd01, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733884125857 2024-12-11T02:28:49,412 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e1495e1233644834bc6e8682875231d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733884125857 2024-12-11T02:28:49,412 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ee20f82cd6b4844a2ce06adc76d0f71, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733884126728 2024-12-11T02:28:49,412 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a5bc5a212d6348888244b000743a1359, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733884126728 2024-12-11T02:28:49,413 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 951ae604bfcf4ef7b6ce4648e5a5e37a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:49,413 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26d7bd1418ce4acaa5edbb6534f0e6de, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:49,430 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:49,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:49,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,431 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:49,432 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#556 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:49,432 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412115b9da8a7cd044a3ab1cec86bd48d9e20_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:49,433 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ee5373a2f3174960a4c14682f29189a3 is 50, key is test_row_0/B:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:49,435 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412115b9da8a7cd044a3ab1cec86bd48d9e20_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:49,435 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115b9da8a7cd044a3ab1cec86bd48d9e20_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121100ae9dd17ad04716ae9d9785875de94e_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884127762/Put/seqid=0 2024-12-11T02:28:49,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742475_1651 (size=12241) 2024-12-11T02:28:49,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742476_1652 (size=4469) 2024-12-11T02:28:49,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742477_1653 (size=12154) 2024-12-11T02:28:49,459 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#555 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:49,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,459 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ae62dfb5ad6f417e8c509deec3523a06 is 175, key is test_row_0/A:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:49,463 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121100ae9dd17ad04716ae9d9785875de94e_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121100ae9dd17ad04716ae9d9785875de94e_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:49,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ec640ca24b67408991017bd50a5b5a73, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:49,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ec640ca24b67408991017bd50a5b5a73 is 175, key is test_row_0/A:col10/1733884127762/Put/seqid=0 2024-12-11T02:28:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742478_1654 (size=31195) 2024-12-11T02:28:49,471 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ae62dfb5ad6f417e8c509deec3523a06 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06 2024-12-11T02:28:49,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742479_1655 (size=30955) 2024-12-11T02:28:49,475 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ec640ca24b67408991017bd50a5b5a73 2024-12-11T02:28:49,477 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into ae62dfb5ad6f417e8c509deec3523a06(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:49,477 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:49,477 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=12, startTime=1733884129408; duration=0sec 2024-12-11T02:28:49,477 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:49,477 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:49,478 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T02:28:49,479 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T02:28:49,480 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:49,480 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,480 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d1dc45d8bc48451fb4c930ef5cd7034a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=47.0 K 2024-12-11T02:28:49,480 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1dc45d8bc48451fb4c930ef5cd7034a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733884124698 2024-12-11T02:28:49,480 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0dc989ec73445c0a180e587aaad5537, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733884125857 2024-12-11T02:28:49,481 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 232473155fb648f093c2bca235bbb290, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733884126728 2024-12-11T02:28:49,482 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86cf1151002b4f0a8672214131b8bc62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:49,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/5c7dd4c9d6d54567bc0aab9032184968 is 50, key is test_row_0/B:col10/1733884127762/Put/seqid=0 2024-12-11T02:28:49,505 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:49,505 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/c4804f6b13a549739f704ca20e0c034c is 50, key is test_row_0/C:col10/1733884127423/Put/seqid=0 2024-12-11T02:28:49,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742480_1656 (size=12001) 2024-12-11T02:28:49,509 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/5c7dd4c9d6d54567bc0aab9032184968 2024-12-11T02:28:49,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7bdb12544603403ebf827aa6e3df3006 is 50, key is test_row_0/C:col10/1733884127762/Put/seqid=0 2024-12-11T02:28:49,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742481_1657 (size=12241) 2024-12-11T02:28:49,524 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/c4804f6b13a549739f704ca20e0c034c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c4804f6b13a549739f704ca20e0c034c 2024-12-11T02:28:49,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742482_1658 (size=12001) 2024-12-11T02:28:49,526 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7bdb12544603403ebf827aa6e3df3006 2024-12-11T02:28:49,529 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into c4804f6b13a549739f704ca20e0c034c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:49,529 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:49,529 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=12, startTime=1733884129409; duration=0sec 2024-12-11T02:28:49,529 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:49,529 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:49,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/ec640ca24b67408991017bd50a5b5a73 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73 2024-12-11T02:28:49,533 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73, entries=150, sequenceid=128, filesize=30.2 K 2024-12-11T02:28:49,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/5c7dd4c9d6d54567bc0aab9032184968 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968 2024-12-11T02:28:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,537 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968, entries=150, sequenceid=128, filesize=11.7 K 2024-12-11T02:28:49,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7bdb12544603403ebf827aa6e3df3006 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006 2024-12-11T02:28:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,541 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006, entries=150, sequenceid=128, filesize=11.7 K 2024-12-11T02:28:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for ee5747d737c855bb22265bdc2d0c886b in 111ms, sequenceid=128, compaction requested=false 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:49,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:49,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-11T02:28:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-11T02:28:49,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7920 sec 2024-12-11T02:28:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.8140 sec 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-11T02:28:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,854 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-11T02:28:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-11T02:28:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,858 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,858 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:49,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:49,859 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ee5373a2f3174960a4c14682f29189a3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ee5373a2f3174960a4c14682f29189a3 2024-12-11T02:28:49,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,865 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into ee5373a2f3174960a4c14682f29189a3(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:49,865 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:49,865 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=12, startTime=1733884129408; duration=0sec 2024-12-11T02:28:49,865 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:49,865 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:49,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:49,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:50,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:50,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-11T02:28:50,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:50,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c65b835b70864aff9e2d37e1d7b356aa_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742483_1659 (size=22268) 2024-12-11T02:28:50,029 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,032 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c65b835b70864aff9e2d37e1d7b356aa_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c65b835b70864aff9e2d37e1d7b356aa_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:50,033 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/de4b036bc2fe459b83e9d795a9385af5, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/de4b036bc2fe459b83e9d795a9385af5 is 175, key is test_row_0/A:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884190041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742484_1660 (size=65673) 2024-12-11T02:28:50,054 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=142, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/de4b036bc2fe459b83e9d795a9385af5 2024-12-11T02:28:50,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/377f6a5041894360978f28d11d912d63 is 50, key is test_row_0/B:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742485_1661 (size=12151) 2024-12-11T02:28:50,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884190145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:50,163 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-11T02:28:50,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:50,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-11T02:28:50,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:50,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,317 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884190348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:50,466 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/377f6a5041894360978f28d11d912d63 2024-12-11T02:28:50,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-11T02:28:50,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:50,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:50,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/6f6b94ee2f6749b2b432b392c9dabc6d is 50, key is test_row_0/C:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742486_1662 (size=12151) 2024-12-11T02:28:50,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=142 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/6f6b94ee2f6749b2b432b392c9dabc6d 2024-12-11T02:28:50,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/de4b036bc2fe459b83e9d795a9385af5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5 2024-12-11T02:28:50,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5, entries=350, sequenceid=142, filesize=64.1 K 2024-12-11T02:28:50,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/377f6a5041894360978f28d11d912d63 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63 2024-12-11T02:28:50,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63, entries=150, sequenceid=142, filesize=11.9 K 2024-12-11T02:28:50,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/6f6b94ee2f6749b2b432b392c9dabc6d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d 2024-12-11T02:28:50,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d, entries=150, sequenceid=142, filesize=11.9 K 2024-12-11T02:28:50,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for ee5747d737c855bb22265bdc2d0c886b in 494ms, sequenceid=142, compaction requested=true 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:50,500 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T02:28:50,501 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:50,501 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:50,501 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:50,502 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,502 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=124.8 K 2024-12-11T02:28:50,502 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5] 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:50,502 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,502 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ee5373a2f3174960a4c14682f29189a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.5 K 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae62dfb5ad6f417e8c509deec3523a06, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec640ca24b67408991017bd50a5b5a73, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733884127755 2024-12-11T02:28:50,502 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ee5373a2f3174960a4c14682f29189a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:50,503 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting de4b036bc2fe459b83e9d795a9385af5, keycount=350, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129959 2024-12-11T02:28:50,503 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c7dd4c9d6d54567bc0aab9032184968, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733884127755 2024-12-11T02:28:50,503 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 377f6a5041894360978f28d11d912d63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129995 2024-12-11T02:28:50,509 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,511 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#565 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:50,511 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/d254858474ec4078b593030997e290c4 is 50, key is test_row_0/B:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,513 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211aa5b31832bff46ff82b3647dfb5a80d5_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,514 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211aa5b31832bff46ff82b3647dfb5a80d5_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,515 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211aa5b31832bff46ff82b3647dfb5a80d5_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742487_1663 (size=12493) 2024-12-11T02:28:50,525 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/d254858474ec4078b593030997e290c4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/d254858474ec4078b593030997e290c4 2024-12-11T02:28:50,529 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into d254858474ec4078b593030997e290c4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:50,529 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:50,529 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884130500; duration=0sec 2024-12-11T02:28:50,529 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:50,529 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:50,530 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:50,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742488_1664 (size=4469) 2024-12-11T02:28:50,531 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:50,531 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:50,531 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,531 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c4804f6b13a549739f704ca20e0c034c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.5 K 2024-12-11T02:28:50,531 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting c4804f6b13a549739f704ca20e0c034c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733884127423 2024-12-11T02:28:50,532 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bdb12544603403ebf827aa6e3df3006, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733884127755 2024-12-11T02:28:50,532 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#564 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:50,532 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f6b94ee2f6749b2b432b392c9dabc6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129995 2024-12-11T02:28:50,533 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a75e217895c14947826a9d20b7693f4e is 175, key is test_row_0/A:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742489_1665 (size=31447) 2024-12-11T02:28:50,542 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a75e217895c14947826a9d20b7693f4e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e 2024-12-11T02:28:50,545 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:50,545 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/beed3ebf1c76433bab7da39fdcf8dc0e is 50, key is test_row_0/C:col10/1733884130003/Put/seqid=0 2024-12-11T02:28:50,549 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into a75e217895c14947826a9d20b7693f4e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:50,549 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:50,549 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884130500; duration=0sec 2024-12-11T02:28:50,549 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:50,549 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:50,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742490_1666 (size=12493) 2024-12-11T02:28:50,624 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-11T02:28:50,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:50,625 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T02:28:50,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:50,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:50,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:50,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:50,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f9f6d618eba401f83981bc6c70193de_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884130039/Put/seqid=0 2024-12-11T02:28:50,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742491_1667 (size=12304) 2024-12-11T02:28:50,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:50,641 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f9f6d618eba401f83981bc6c70193de_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f9f6d618eba401f83981bc6c70193de_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:50,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/eaf324664c4e400f887214cff2b515bf, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:50,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/eaf324664c4e400f887214cff2b515bf is 175, key is test_row_0/A:col10/1733884130039/Put/seqid=0 2024-12-11T02:28:50,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742492_1668 (size=31105) 2024-12-11T02:28:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:50,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884190653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,955 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/beed3ebf1c76433bab7da39fdcf8dc0e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/beed3ebf1c76433bab7da39fdcf8dc0e 2024-12-11T02:28:50,958 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into beed3ebf1c76433bab7da39fdcf8dc0e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:50,958 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:50,958 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884130500; duration=0sec 2024-12-11T02:28:50,959 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:50,959 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:50,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884190960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884190960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:50,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884190962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884190962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,047 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/eaf324664c4e400f887214cff2b515bf 2024-12-11T02:28:51,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ff15c5defea04b6c95490fc630e194fe is 50, key is test_row_0/B:col10/1733884130039/Put/seqid=0 2024-12-11T02:28:51,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742493_1669 (size=12151) 2024-12-11T02:28:51,065 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ff15c5defea04b6c95490fc630e194fe 2024-12-11T02:28:51,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/503971c17b8846c4ae1e0540420fc86d is 50, key is test_row_0/C:col10/1733884130039/Put/seqid=0 2024-12-11T02:28:51,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742494_1670 (size=12151) 2024-12-11T02:28:51,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884191157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884191263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884191263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884191264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884191267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,477 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/503971c17b8846c4ae1e0540420fc86d 2024-12-11T02:28:51,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/eaf324664c4e400f887214cff2b515bf as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf 2024-12-11T02:28:51,484 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf, entries=150, sequenceid=170, filesize=30.4 K 2024-12-11T02:28:51,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ff15c5defea04b6c95490fc630e194fe as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe 2024-12-11T02:28:51,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,488 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe, entries=150, sequenceid=170, filesize=11.9 K 2024-12-11T02:28:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/503971c17b8846c4ae1e0540420fc86d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d 2024-12-11T02:28:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d, entries=150, sequenceid=170, filesize=11.9 K 2024-12-11T02:28:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ee5747d737c855bb22265bdc2d0c886b in 869ms, sequenceid=170, compaction requested=false 2024-12-11T02:28:51,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:51,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:51,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-11T02:28:51,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-11T02:28:51,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-11T02:28:51,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6380 sec 2024-12-11T02:28:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.6410 sec 2024-12-11T02:28:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T02:28:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:51,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:51,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:51,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:51,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:51,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:51,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e733dd8a54b84337a793571337c0749c_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:51,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742495_1671 (size=12304) 2024-12-11T02:28:51,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884191803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884191804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884191806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884191806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884191907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884191907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884191909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884191910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:51,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-11T02:28:51,964 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-11T02:28:51,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-11T02:28:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:51,967 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:51,967 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:51,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:52,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884192110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884192110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884192111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884192112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,119 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884192161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,198 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:52,201 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e733dd8a54b84337a793571337c0749c_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e733dd8a54b84337a793571337c0749c_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:52,202 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c6729342d04b80a283ece5e1bf73e3, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:52,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c6729342d04b80a283ece5e1bf73e3 is 175, key is test_row_0/A:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:52,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742496_1672 (size=31105) 2024-12-11T02:28:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:52,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884192414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884192414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884192414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884192417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,425 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:52,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,607 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=183, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c6729342d04b80a283ece5e1bf73e3 2024-12-11T02:28:52,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/19d36e37f43e494baca1423d6e6316d2 is 50, key is test_row_0/B:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:52,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742497_1673 (size=12151) 2024-12-11T02:28:52,730 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:52,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:52,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:52,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:52,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884192918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884192919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884192920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:52,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884192920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/19d36e37f43e494baca1423d6e6316d2 2024-12-11T02:28:53,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/e9a0911b82fc47b49a4fbe3575da669e is 50, key is test_row_0/C:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:53,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742498_1674 (size=12151) 2024-12-11T02:28:53,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/e9a0911b82fc47b49a4fbe3575da669e 2024-12-11T02:28:53,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:53,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:53,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:53,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:53,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/70c6729342d04b80a283ece5e1bf73e3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3 2024-12-11T02:28:53,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3, entries=150, sequenceid=183, filesize=30.4 K 2024-12-11T02:28:53,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/19d36e37f43e494baca1423d6e6316d2 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2 2024-12-11T02:28:53,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2, entries=150, sequenceid=183, filesize=11.9 K 2024-12-11T02:28:53,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/e9a0911b82fc47b49a4fbe3575da669e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e 2024-12-11T02:28:53,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e, entries=150, sequenceid=183, filesize=11.9 K 2024-12-11T02:28:53,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ee5747d737c855bb22265bdc2d0c886b in 1275ms, sequenceid=183, compaction requested=true 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:53,050 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:53,050 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:53,051 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:53,051 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:53,051 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,051 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=91.5 K 2024-12-11T02:28:53,051 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,051 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3] 2024-12-11T02:28:53,052 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a75e217895c14947826a9d20b7693f4e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129995 2024-12-11T02:28:53,052 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:53,052 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaf324664c4e400f887214cff2b515bf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733884130036 2024-12-11T02:28:53,052 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:53,052 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,052 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/d254858474ec4078b593030997e290c4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.9 K 2024-12-11T02:28:53,052 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70c6729342d04b80a283ece5e1bf73e3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:53,053 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting d254858474ec4078b593030997e290c4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129995 2024-12-11T02:28:53,053 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ff15c5defea04b6c95490fc630e194fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733884130036 2024-12-11T02:28:53,054 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 19d36e37f43e494baca1423d6e6316d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:53,058 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:53,060 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#574 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:53,060 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/9c0835e89bc1416f98ad7ef33db2685f is 50, key is test_row_0/B:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:53,062 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121134da2608ec0647de9d9afd95a2e96ad4_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:53,064 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121134da2608ec0647de9d9afd95a2e96ad4_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:53,064 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121134da2608ec0647de9d9afd95a2e96ad4_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:53,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742499_1675 (size=12595) 2024-12-11T02:28:53,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742500_1676 (size=4469) 2024-12-11T02:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:53,070 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/9c0835e89bc1416f98ad7ef33db2685f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/9c0835e89bc1416f98ad7ef33db2685f 2024-12-11T02:28:53,071 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#573 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:53,071 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/3091004dfca84befb556e01f4cebe5f0 is 175, key is test_row_0/A:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:53,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742501_1677 (size=31549) 2024-12-11T02:28:53,076 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into 9c0835e89bc1416f98ad7ef33db2685f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:53,076 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:53,076 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884133050; duration=0sec 2024-12-11T02:28:53,076 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:53,076 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:53,076 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:53,077 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:53,077 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:53,078 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,078 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/beed3ebf1c76433bab7da39fdcf8dc0e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=35.9 K 2024-12-11T02:28:53,078 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/3091004dfca84befb556e01f4cebe5f0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0 2024-12-11T02:28:53,079 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting beed3ebf1c76433bab7da39fdcf8dc0e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=142, earliestPutTs=1733884129995 2024-12-11T02:28:53,079 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 503971c17b8846c4ae1e0540420fc86d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733884130036 2024-12-11T02:28:53,079 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting e9a0911b82fc47b49a4fbe3575da669e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:53,082 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 3091004dfca84befb556e01f4cebe5f0(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:53,082 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:53,082 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884133050; duration=0sec 2024-12-11T02:28:53,082 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:53,082 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:53,085 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#575 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:53,085 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/8e265d0647b74a4e80e3568be94cf120 is 50, key is test_row_0/C:col10/1733884131773/Put/seqid=0 2024-12-11T02:28:53,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742502_1678 (size=12595) 2024-12-11T02:28:53,092 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/8e265d0647b74a4e80e3568be94cf120 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8e265d0647b74a4e80e3568be94cf120 2024-12-11T02:28:53,095 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 8e265d0647b74a4e80e3568be94cf120(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:53,095 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:53,095 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884133050; duration=0sec 2024-12-11T02:28:53,095 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:53,095 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:53,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:53,188 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:53,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:53,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:53,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121190ad049d42214c15adb45cb7b95df720_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884131801/Put/seqid=0 2024-12-11T02:28:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742503_1679 (size=12304) 2024-12-11T02:28:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:53,202 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121190ad049d42214c15adb45cb7b95df720_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190ad049d42214c15adb45cb7b95df720_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:53,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/020822e008e746f5a7cdf26067798ece, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:53,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/020822e008e746f5a7cdf26067798ece is 175, key is test_row_0/A:col10/1733884131801/Put/seqid=0 2024-12-11T02:28:53,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742504_1680 (size=31105) 2024-12-11T02:28:53,608 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/020822e008e746f5a7cdf26067798ece 2024-12-11T02:28:53,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/3b30d74f91bc43c1b98778070b0623b9 is 50, key is test_row_0/B:col10/1733884131801/Put/seqid=0 2024-12-11T02:28:53,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742505_1681 (size=12151) 2024-12-11T02:28:53,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:53,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:53,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:53,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884193930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:53,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884193931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:53,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884193931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:53,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:53,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884193933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,026 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/3b30d74f91bc43c1b98778070b0623b9 2024-12-11T02:28:54,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/0cedad35cc26498985f60d5a4750a33e is 50, key is test_row_0/C:col10/1733884131801/Put/seqid=0 2024-12-11T02:28:54,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884194034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884194034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884194036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742506_1682 (size=12151) 2024-12-11T02:28:54,039 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/0cedad35cc26498985f60d5a4750a33e 2024-12-11T02:28:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/020822e008e746f5a7cdf26067798ece as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece 2024-12-11T02:28:54,046 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece, entries=150, sequenceid=210, filesize=30.4 K 2024-12-11T02:28:54,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/3b30d74f91bc43c1b98778070b0623b9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9 2024-12-11T02:28:54,050 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9, entries=150, sequenceid=210, filesize=11.9 K 2024-12-11T02:28:54,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/0cedad35cc26498985f60d5a4750a33e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e 2024-12-11T02:28:54,053 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e, entries=150, sequenceid=210, filesize=11.9 K 2024-12-11T02:28:54,054 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ee5747d737c855bb22265bdc2d0c886b in 866ms, sequenceid=210, compaction requested=false 2024-12-11T02:28:54,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:54,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-11T02:28:54,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-11T02:28:54,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-11T02:28:54,057 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0870 sec 2024-12-11T02:28:54,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.0920 sec 2024-12-11T02:28:54,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-11T02:28:54,071 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-11T02:28:54,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:54,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-11T02:28:54,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:54,073 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:54,074 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:54,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:54,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:54,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:54,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:54,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:54,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119cd70c4b27594f60af55dab304031221_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:54,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742507_1683 (size=12304) 2024-12-11T02:28:54,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884194232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884194236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884194237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884194238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884194335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:54,377 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884194537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884194540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884194540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884194541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,591 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:54,595 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119cd70c4b27594f60af55dab304031221_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119cd70c4b27594f60af55dab304031221_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:54,595 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/287f950e800447519b20a8e88db38e89, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:54,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/287f950e800447519b20a8e88db38e89 is 175, key is test_row_0/A:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:54,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742508_1684 (size=31105) 2024-12-11T02:28:54,600 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=223, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/287f950e800447519b20a8e88db38e89 2024-12-11T02:28:54,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/29c412ad60b54051b60aa4682bbb1325 is 50, key is test_row_0/B:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:54,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742509_1685 (size=12151) 2024-12-11T02:28:54,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:54,682 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:54,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884194840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,988 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:54,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:54,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:54,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:54,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:54,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/29c412ad60b54051b60aa4682bbb1325 2024-12-11T02:28:55,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/9bfe2cf79c1d482b85232fca7865668e is 50, key is test_row_0/C:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:55,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742510_1686 (size=12151) 2024-12-11T02:28:55,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:55,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884195045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:55,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884195045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:55,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884195046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,140 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:55,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:55,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:55,293 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:55,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:55,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:55,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884195347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/9bfe2cf79c1d482b85232fca7865668e 2024-12-11T02:28:55,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/287f950e800447519b20a8e88db38e89 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89 2024-12-11T02:28:55,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89, entries=150, sequenceid=223, filesize=30.4 K 2024-12-11T02:28:55,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/29c412ad60b54051b60aa4682bbb1325 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325 2024-12-11T02:28:55,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325, entries=150, sequenceid=223, filesize=11.9 K 2024-12-11T02:28:55,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/9bfe2cf79c1d482b85232fca7865668e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e 2024-12-11T02:28:55,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e, entries=150, sequenceid=223, filesize=11.9 K 2024-12-11T02:28:55,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ee5747d737c855bb22265bdc2d0c886b in 1256ms, sequenceid=223, compaction requested=true 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:55,435 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:55,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:55,435 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:55,436 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:55,436 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:55,436 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:55,436 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:55,436 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,436 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,436 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/9c0835e89bc1416f98ad7ef33db2685f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.0 K 2024-12-11T02:28:55,436 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=91.6 K 2024-12-11T02:28:55,436 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,436 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89] 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c0835e89bc1416f98ad7ef33db2685f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3091004dfca84befb556e01f4cebe5f0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b30d74f91bc43c1b98778070b0623b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733884131801 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 020822e008e746f5a7cdf26067798ece, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733884131801 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 29c412ad60b54051b60aa4682bbb1325, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:55,437 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 287f950e800447519b20a8e88db38e89, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:55,442 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:55,443 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#582 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:55,444 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/172b37802ab54224a4f9b68977de31c6 is 50, key is test_row_0/B:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:55,444 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211f6a69ee31b1c45959e6cc181c871de7c_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:55,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:55,446 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211f6a69ee31b1c45959e6cc181c871de7c_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:55,446 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f6a69ee31b1c45959e6cc181c871de7c_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:55,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,447 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:55,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:55,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742512_1688 (size=4469) 2024-12-11T02:28:55,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742511_1687 (size=12697) 2024-12-11T02:28:55,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110233689068c44623b8dc94e3c96c94b2_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884134222/Put/seqid=0 2024-12-11T02:28:55,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742513_1689 (size=12304) 2024-12-11T02:28:55,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:55,461 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110233689068c44623b8dc94e3c96c94b2_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110233689068c44623b8dc94e3c96c94b2_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:55,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c8cd41b086854dfeb2dfa55e88b9365d, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:55,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c8cd41b086854dfeb2dfa55e88b9365d is 175, key is test_row_0/A:col10/1733884134222/Put/seqid=0 2024-12-11T02:28:55,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742514_1690 (size=31105) 2024-12-11T02:28:55,852 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#583 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:55,853 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/3c01e45d5bc74ee7abfb9f9aad7cb49e is 175, key is test_row_0/A:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:55,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742515_1691 (size=31651) 2024-12-11T02:28:55,858 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/172b37802ab54224a4f9b68977de31c6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/172b37802ab54224a4f9b68977de31c6 2024-12-11T02:28:55,862 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into 172b37802ab54224a4f9b68977de31c6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:55,862 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:55,862 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884135435; duration=0sec 2024-12-11T02:28:55,862 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:55,862 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:55,862 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:55,863 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:55,863 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:55,863 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:55,863 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8e265d0647b74a4e80e3568be94cf120, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.0 K 2024-12-11T02:28:55,863 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e265d0647b74a4e80e3568be94cf120, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733884130653 2024-12-11T02:28:55,864 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cedad35cc26498985f60d5a4750a33e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733884131801 2024-12-11T02:28:55,864 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bfe2cf79c1d482b85232fca7865668e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:55,869 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#585 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:55,869 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c8cd41b086854dfeb2dfa55e88b9365d 2024-12-11T02:28:55,869 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/3d74ba2ea4fd4421a0bdd079c20b41fe is 50, key is test_row_0/C:col10/1733884133932/Put/seqid=0 2024-12-11T02:28:55,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742516_1692 (size=12697) 2024-12-11T02:28:55,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/44869d1ba39e488eaedd3e2e24452de0 is 50, key is test_row_0/B:col10/1733884134222/Put/seqid=0 2024-12-11T02:28:55,879 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/3d74ba2ea4fd4421a0bdd079c20b41fe as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3d74ba2ea4fd4421a0bdd079c20b41fe 2024-12-11T02:28:55,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742517_1693 (size=12151) 2024-12-11T02:28:55,883 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 3d74ba2ea4fd4421a0bdd079c20b41fe(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:55,883 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:55,883 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884135435; duration=0sec 2024-12-11T02:28:55,883 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:55,883 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:55,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:55,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884195970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884196049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884196051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884196056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884196074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:56,262 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/3c01e45d5bc74ee7abfb9f9aad7cb49e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e 2024-12-11T02:28:56,266 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 3c01e45d5bc74ee7abfb9f9aad7cb49e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:56,266 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:56,266 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884135435; duration=0sec 2024-12-11T02:28:56,266 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:56,266 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:56,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884196276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,282 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/44869d1ba39e488eaedd3e2e24452de0 2024-12-11T02:28:56,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/24cb357534d3413e833c0378bcd59c90 is 50, key is test_row_0/C:col10/1733884134222/Put/seqid=0 2024-12-11T02:28:56,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742518_1694 (size=12151) 2024-12-11T02:28:56,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884196358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:56,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884196580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:56,691 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/24cb357534d3413e833c0378bcd59c90 2024-12-11T02:28:56,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c8cd41b086854dfeb2dfa55e88b9365d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d 2024-12-11T02:28:56,698 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d, entries=150, sequenceid=246, filesize=30.4 K 2024-12-11T02:28:56,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/44869d1ba39e488eaedd3e2e24452de0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0 2024-12-11T02:28:56,702 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0, entries=150, sequenceid=246, filesize=11.9 K 2024-12-11T02:28:56,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/24cb357534d3413e833c0378bcd59c90 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90 2024-12-11T02:28:56,706 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90, entries=150, sequenceid=246, filesize=11.9 K 2024-12-11T02:28:56,708 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ee5747d737c855bb22265bdc2d0c886b in 1260ms, sequenceid=246, compaction requested=false 2024-12-11T02:28:56,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:56,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:56,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-11T02:28:56,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-11T02:28:56,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-11T02:28:56,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6350 sec 2024-12-11T02:28:56,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.6380 sec 2024-12-11T02:28:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:57,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:57,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:57,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118a2c7d3f12084c568b1122456715fcdf_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742519_1695 (size=12454) 2024-12-11T02:28:57,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:57,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884197138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:57,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:57,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884197240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:57,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:57,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884197445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:57,496 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:57,500 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118a2c7d3f12084c568b1122456715fcdf_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118a2c7d3f12084c568b1122456715fcdf_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:57,500 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a9102870601741e5804701f344261f55, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:57,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a9102870601741e5804701f344261f55 is 175, key is test_row_0/A:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:57,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742520_1696 (size=31255) 2024-12-11T02:28:57,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884197749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:57,905 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=263, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a9102870601741e5804701f344261f55 2024-12-11T02:28:57,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/cd79f54e262849e2ad504678554c69d5 is 50, key is test_row_0/B:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:57,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742521_1697 (size=12301) 2024-12-11T02:28:57,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/cd79f54e262849e2ad504678554c69d5 2024-12-11T02:28:57,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/130dcf39bbeb4b51966598875fc3a006 is 50, key is test_row_0/C:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:57,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742522_1698 (size=12301) 2024-12-11T02:28:58,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884198059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,062 DEBUG [Thread-2729 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:58,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884198066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,068 DEBUG [Thread-2731 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:58,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884198073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,076 DEBUG [Thread-2735 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:28:58,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T02:28:58,177 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-11T02:28:58,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:28:58,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-11T02:28:58,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:28:58,180 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:28:58,180 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:28:58,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:28:58,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884198255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:28:58,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/130dcf39bbeb4b51966598875fc3a006 2024-12-11T02:28:58,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a9102870601741e5804701f344261f55 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55 2024-12-11T02:28:58,332 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:58,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55, entries=150, sequenceid=263, filesize=30.5 K 2024-12-11T02:28:58,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:58,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/cd79f54e262849e2ad504678554c69d5 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5 2024-12-11T02:28:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5, entries=150, sequenceid=263, filesize=12.0 K 2024-12-11T02:28:58,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/130dcf39bbeb4b51966598875fc3a006 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006 2024-12-11T02:28:58,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006, entries=150, sequenceid=263, filesize=12.0 K 2024-12-11T02:28:58,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ee5747d737c855bb22265bdc2d0c886b in 1255ms, sequenceid=263, compaction requested=true 2024-12-11T02:28:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:58,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:58,342 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:58,342 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94011 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:58,343 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,343 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,343 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=91.8 K 2024-12-11T02:28:58,343 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/172b37802ab54224a4f9b68977de31c6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.3 K 2024-12-11T02:28:58,343 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55] 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 172b37802ab54224a4f9b68977de31c6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:58,343 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c01e45d5bc74ee7abfb9f9aad7cb49e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:58,344 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 44869d1ba39e488eaedd3e2e24452de0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733884134222 2024-12-11T02:28:58,344 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8cd41b086854dfeb2dfa55e88b9365d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733884134222 2024-12-11T02:28:58,344 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting cd79f54e262849e2ad504678554c69d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:58,344 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9102870601741e5804701f344261f55, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:58,349 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:58,351 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211bf45c85ef81c4a909235a251fe4d3f68_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:58,351 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#592 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:58,352 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/a311fdefefe24ce1bf0fe3727bf8c338 is 50, key is test_row_0/B:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:58,352 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211bf45c85ef81c4a909235a251fe4d3f68_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:58,352 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bf45c85ef81c4a909235a251fe4d3f68_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742524_1700 (size=4469) 2024-12-11T02:28:58,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742523_1699 (size=12949) 2024-12-11T02:28:58,359 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#591 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:58,360 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/91f775b81a4f4c2c952bda5eb33ef09f is 175, key is test_row_0/A:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:58,364 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/a311fdefefe24ce1bf0fe3727bf8c338 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a311fdefefe24ce1bf0fe3727bf8c338 2024-12-11T02:28:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742525_1701 (size=31903) 2024-12-11T02:28:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:58,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:58,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:58,371 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into a311fdefefe24ce1bf0fe3727bf8c338(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:58,371 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:58,371 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884138342; duration=0sec 2024-12-11T02:28:58,371 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:58,371 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:58,371 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:58,371 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/91f775b81a4f4c2c952bda5eb33ef09f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f 2024-12-11T02:28:58,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114963bff6c224488e8f6fef6a4cdc6260_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884137137/Put/seqid=0 2024-12-11T02:28:58,374 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:58,374 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:58,374 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,374 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3d74ba2ea4fd4421a0bdd079c20b41fe, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.3 K 2024-12-11T02:28:58,375 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d74ba2ea4fd4421a0bdd079c20b41fe, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733884133930 2024-12-11T02:28:58,376 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 24cb357534d3413e833c0378bcd59c90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733884134222 2024-12-11T02:28:58,376 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 130dcf39bbeb4b51966598875fc3a006, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742526_1702 (size=12454) 2024-12-11T02:28:58,379 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:58,380 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 91f775b81a4f4c2c952bda5eb33ef09f(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:58,380 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:58,380 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884138341; duration=0sec 2024-12-11T02:28:58,380 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:58,380 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:58,382 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114963bff6c224488e8f6fef6a4cdc6260_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114963bff6c224488e8f6fef6a4cdc6260_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:58,384 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/0b379dcf88f14a2cae928a7678951a58, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:58,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/0b379dcf88f14a2cae928a7678951a58 is 175, key is test_row_0/A:col10/1733884137137/Put/seqid=0 2024-12-11T02:28:58,385 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#594 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:58,386 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/6aecd284ba2149f6ad1e7c88c6106df0 is 50, key is test_row_0/C:col10/1733884137085/Put/seqid=0 2024-12-11T02:28:58,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742527_1703 (size=31255) 2024-12-11T02:28:58,397 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/0b379dcf88f14a2cae928a7678951a58 2024-12-11T02:28:58,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884198396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742528_1704 (size=12949) 2024-12-11T02:28:58,406 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/6aecd284ba2149f6ad1e7c88c6106df0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6aecd284ba2149f6ad1e7c88c6106df0 2024-12-11T02:28:58,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/2150e4ba65464bcbb02b105591ed2287 is 50, key is test_row_0/B:col10/1733884137137/Put/seqid=0 2024-12-11T02:28:58,418 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 6aecd284ba2149f6ad1e7c88c6106df0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:58,419 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:58,419 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884138342; duration=0sec 2024-12-11T02:28:58,419 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:58,419 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:28:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742529_1705 (size=12301) 2024-12-11T02:28:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:28:58,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:58,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884198498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,637 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884198701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:28:58,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:58,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:58,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/2150e4ba65464bcbb02b105591ed2287 2024-12-11T02:28:58,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/12d9169a2fab4c409c9467c91ca51e3b is 50, key is test_row_0/C:col10/1733884137137/Put/seqid=0 2024-12-11T02:28:58,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742530_1706 (size=12301) 2024-12-11T02:28:58,943 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:58,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:58,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:58,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:58,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:58,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884199005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,096 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/12d9169a2fab4c409c9467c91ca51e3b 2024-12-11T02:28:59,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/0b379dcf88f14a2cae928a7678951a58 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58 2024-12-11T02:28:59,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58, entries=150, sequenceid=286, filesize=30.5 K 2024-12-11T02:28:59,241 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-11T02:28:59,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/2150e4ba65464bcbb02b105591ed2287 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287 2024-12-11T02:28:59,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287, entries=150, sequenceid=286, filesize=12.0 K 2024-12-11T02:28:59,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/12d9169a2fab4c409c9467c91ca51e3b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b 2024-12-11T02:28:59,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b, entries=150, sequenceid=286, filesize=12.0 K 2024-12-11T02:28:59,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ee5747d737c855bb22265bdc2d0c886b in 883ms, sequenceid=286, compaction requested=false 2024-12-11T02:28:59,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:59,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:59,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121195c7fe085b6f41cea4b36fb3715f4811_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742531_1707 (size=12454) 2024-12-11T02:28:59,273 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:28:59,276 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121195c7fe085b6f41cea4b36fb3715f4811_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121195c7fe085b6f41cea4b36fb3715f4811_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:59,277 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/196ec90b5fec4627815c9d9de6c4dd59, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:59,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/196ec90b5fec4627815c9d9de6c4dd59 is 175, key is test_row_0/A:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742532_1708 (size=31255) 2024-12-11T02:28:59,281 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/196ec90b5fec4627815c9d9de6c4dd59 2024-12-11T02:28:59,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:28:59,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/af41ffe885b14dddae880c35d03aab55 is 50, key is test_row_0/B:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742533_1709 (size=12301) 2024-12-11T02:28:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884199317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884199419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884199511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,554 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884199622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/af41ffe885b14dddae880c35d03aab55 2024-12-11T02:28:59,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/1ba2a8d59f144c1d9487f6e76cf6ca0b is 50, key is test_row_0/C:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742534_1710 (size=12301) 2024-12-11T02:28:59,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/1ba2a8d59f144c1d9487f6e76cf6ca0b 2024-12-11T02:28:59,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/196ec90b5fec4627815c9d9de6c4dd59 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59 2024-12-11T02:28:59,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:28:59,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59, entries=150, sequenceid=303, filesize=30.5 K 2024-12-11T02:28:59,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/af41ffe885b14dddae880c35d03aab55 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55 2024-12-11T02:28:59,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55, entries=150, sequenceid=303, filesize=12.0 K 2024-12-11T02:28:59,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/1ba2a8d59f144c1d9487f6e76cf6ca0b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b 2024-12-11T02:28:59,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b, entries=150, sequenceid=303, filesize=12.0 K 2024-12-11T02:28:59,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ee5747d737c855bb22265bdc2d0c886b in 462ms, sequenceid=303, compaction requested=true 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:28:59,725 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:59,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:59,725 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:59,726 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:59,726 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:59,726 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:28:59,726 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:28:59,726 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,726 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,726 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a311fdefefe24ce1bf0fe3727bf8c338, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.7 K 2024-12-11T02:28:59,726 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=92.2 K 2024-12-11T02:28:59,726 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,726 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59] 2024-12-11T02:28:59,727 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting a311fdefefe24ce1bf0fe3727bf8c338, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:59,727 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91f775b81a4f4c2c952bda5eb33ef09f, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:59,727 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 2150e4ba65464bcbb02b105591ed2287, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733884137123 2024-12-11T02:28:59,727 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b379dcf88f14a2cae928a7678951a58, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733884137123 2024-12-11T02:28:59,727 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting af41ffe885b14dddae880c35d03aab55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:28:59,728 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 196ec90b5fec4627815c9d9de6c4dd59, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:28:59,735 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#600 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:59,736 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/7d225700cd7c4ab1b861e0de53133f8e is 50, key is test_row_0/B:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,745 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:59,753 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211ddd1565b40ad4f458ee356bd60a9d971_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:59,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211ddd1565b40ad4f458ee356bd60a9d971_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:59,754 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ddd1565b40ad4f458ee356bd60a9d971_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:28:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742535_1711 (size=13051) 2024-12-11T02:28:59,760 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/7d225700cd7c4ab1b861e0de53133f8e as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/7d225700cd7c4ab1b861e0de53133f8e 2024-12-11T02:28:59,763 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into 7d225700cd7c4ab1b861e0de53133f8e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:59,763 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:59,763 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884139725; duration=0sec 2024-12-11T02:28:59,763 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:28:59,763 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:28:59,764 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:28:59,765 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:28:59,765 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:28:59,765 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,765 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6aecd284ba2149f6ad1e7c88c6106df0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.7 K 2024-12-11T02:28:59,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742536_1712 (size=4469) 2024-12-11T02:28:59,765 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 6aecd284ba2149f6ad1e7c88c6106df0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1733884135960 2024-12-11T02:28:59,766 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#601 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:59,766 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 12d9169a2fab4c409c9467c91ca51e3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733884137123 2024-12-11T02:28:59,767 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ba2a8d59f144c1d9487f6e76cf6ca0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:28:59,767 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/86f9aefd83f343dbb956333acabe9ff8 is 175, key is test_row_0/A:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,774 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#602 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:28:59,774 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/09541e5fb85d415bb8d0d242ab4be95a is 50, key is test_row_0/C:col10/1733884138393/Put/seqid=0 2024-12-11T02:28:59,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742537_1713 (size=32005) 2024-12-11T02:28:59,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742538_1714 (size=13051) 2024-12-11T02:28:59,788 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/86f9aefd83f343dbb956333acabe9ff8 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8 2024-12-11T02:28:59,793 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 86f9aefd83f343dbb956333acabe9ff8(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:28:59,793 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:28:59,793 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884139725; duration=0sec 2024-12-11T02:28:59,793 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:28:59,793 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:28:59,860 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:28:59,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:28:59,861 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:28:59,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:28:59,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211aba3abbd874f497dbaaf5a34fbd8446f_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884139308/Put/seqid=0 2024-12-11T02:28:59,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742539_1715 (size=12454) 2024-12-11T02:28:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:28:59,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:28:59,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:28:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884199953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884200055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,183 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/09541e5fb85d415bb8d0d242ab4be95a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/09541e5fb85d415bb8d0d242ab4be95a 2024-12-11T02:29:00,187 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 09541e5fb85d415bb8d0d242ab4be95a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:00,187 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:00,187 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884139725; duration=0sec 2024-12-11T02:29:00,187 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:00,187 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:29:00,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884200257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:00,282 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211aba3abbd874f497dbaaf5a34fbd8446f_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211aba3abbd874f497dbaaf5a34fbd8446f_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:00,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/d1c5adcfe3a44975a3c566cd08111b4c, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/d1c5adcfe3a44975a3c566cd08111b4c is 175, key is test_row_0/A:col10/1733884139308/Put/seqid=0 2024-12-11T02:29:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:29:00,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742540_1716 (size=31255) 2024-12-11T02:29:00,287 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/d1c5adcfe3a44975a3c566cd08111b4c 2024-12-11T02:29:00,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/59dc08ab57784536ac27d8d42853c56d is 50, key is test_row_0/B:col10/1733884139308/Put/seqid=0 2024-12-11T02:29:00,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742541_1717 (size=12301) 2024-12-11T02:29:00,299 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/59dc08ab57784536ac27d8d42853c56d 2024-12-11T02:29:00,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/b63a8d93f96d41f8bc34b66471e9dec7 is 50, key is test_row_0/C:col10/1733884139308/Put/seqid=0 2024-12-11T02:29:00,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742542_1718 (size=12301) 2024-12-11T02:29:00,312 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/b63a8d93f96d41f8bc34b66471e9dec7 2024-12-11T02:29:00,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/d1c5adcfe3a44975a3c566cd08111b4c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c 2024-12-11T02:29:00,320 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c, entries=150, sequenceid=327, filesize=30.5 K 2024-12-11T02:29:00,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/59dc08ab57784536ac27d8d42853c56d as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d 2024-12-11T02:29:00,324 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d, entries=150, sequenceid=327, filesize=12.0 K 2024-12-11T02:29:00,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/b63a8d93f96d41f8bc34b66471e9dec7 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7 2024-12-11T02:29:00,328 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7, entries=150, sequenceid=327, filesize=12.0 K 2024-12-11T02:29:00,329 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for ee5747d737c855bb22265bdc2d0c886b in 468ms, sequenceid=327, compaction requested=false 2024-12-11T02:29:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:00,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:00,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-11T02:29:00,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-11T02:29:00,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-11T02:29:00,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1490 sec 2024-12-11T02:29:00,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.1530 sec 2024-12-11T02:29:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:00,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:29:00,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:00,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121191989b15ad01438285944eb323e4a42b_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:00,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742543_1719 (size=12454) 2024-12-11T02:29:00,532 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:00,535 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121191989b15ad01438285944eb323e4a42b_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121191989b15ad01438285944eb323e4a42b_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:00,536 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c4f16967f1b74a689afa8aa78d99c55f, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c4f16967f1b74a689afa8aa78d99c55f is 175, key is test_row_0/A:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742544_1720 (size=31255) 2024-12-11T02:29:00,540 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c4f16967f1b74a689afa8aa78d99c55f 2024-12-11T02:29:00,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/007236ee05df4e8bb5fab733e18caab3 is 50, key is test_row_0/B:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:00,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742545_1721 (size=12301) 2024-12-11T02:29:00,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884200568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884200569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884200672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884200672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884200875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:00,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884200875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:00,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/007236ee05df4e8bb5fab733e18caab3 2024-12-11T02:29:00,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/f03886c2a3444c82aee9d1e3dd1d04e9 is 50, key is test_row_0/C:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:00,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742546_1722 (size=12301) 2024-12-11T02:29:00,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/f03886c2a3444c82aee9d1e3dd1d04e9 2024-12-11T02:29:00,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/c4f16967f1b74a689afa8aa78d99c55f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f 2024-12-11T02:29:00,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f, entries=150, sequenceid=343, filesize=30.5 K 2024-12-11T02:29:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/007236ee05df4e8bb5fab733e18caab3 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3 2024-12-11T02:29:00,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T02:29:00,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/f03886c2a3444c82aee9d1e3dd1d04e9 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9 2024-12-11T02:29:00,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T02:29:00,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ee5747d737c855bb22265bdc2d0c886b in 454ms, sequenceid=343, compaction requested=true 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:00,975 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:29:00,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:29:00,975 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:00,976 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:29:00,977 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:00,977 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=92.3 K 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:29:00,977 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:00,977 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f] 2024-12-11T02:29:00,977 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/7d225700cd7c4ab1b861e0de53133f8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.8 K 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86f9aefd83f343dbb956333acabe9ff8, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:29:00,977 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d225700cd7c4ab1b861e0de53133f8e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:29:00,978 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1c5adcfe3a44975a3c566cd08111b4c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733884139303 2024-12-11T02:29:00,978 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 59dc08ab57784536ac27d8d42853c56d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733884139303 2024-12-11T02:29:00,978 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4f16967f1b74a689afa8aa78d99c55f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:00,978 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 007236ee05df4e8bb5fab733e18caab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:00,983 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,986 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121191b6ad2c1a914e3f896c25d33314cc05_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,986 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#610 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:00,987 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ddb16b12ebf449a0b823d6c419cbf363 is 50, key is test_row_0/B:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:00,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121191b6ad2c1a914e3f896c25d33314cc05_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,987 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121191b6ad2c1a914e3f896c25d33314cc05_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742547_1723 (size=13153) 2024-12-11T02:29:00,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742548_1724 (size=4469) 2024-12-11T02:29:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:01,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:29:01,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d84df168a38144bf91cb68a15c430a1a_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884141181/Put/seqid=0 2024-12-11T02:29:01,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742549_1725 (size=17534) 2024-12-11T02:29:01,193 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:01,196 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d84df168a38144bf91cb68a15c430a1a_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d84df168a38144bf91cb68a15c430a1a_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:01,197 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/32d8c7099e9b476294d3eff99e7086c4, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:01,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/32d8c7099e9b476294d3eff99e7086c4 is 175, key is test_row_0/A:col10/1733884141181/Put/seqid=0 2024-12-11T02:29:01,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742550_1726 (size=48639) 2024-12-11T02:29:01,201 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/32d8c7099e9b476294d3eff99e7086c4 2024-12-11T02:29:01,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884201199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884201203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/0bde79b7801546bab73f4e369add925b is 50, key is test_row_0/B:col10/1733884141181/Put/seqid=0 2024-12-11T02:29:01,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742551_1727 (size=12301) 2024-12-11T02:29:01,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/0bde79b7801546bab73f4e369add925b 2024-12-11T02:29:01,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/008f39407e8d46258f8c555a8e76d09c is 50, key is test_row_0/C:col10/1733884141181/Put/seqid=0 2024-12-11T02:29:01,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742552_1728 (size=12301) 2024-12-11T02:29:01,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884201304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884201307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,392 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#609 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:01,393 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4c61f9f5d25e4a0d9439f5cb61d2310a is 175, key is test_row_0/A:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:01,395 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/ddb16b12ebf449a0b823d6c419cbf363 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ddb16b12ebf449a0b823d6c419cbf363 2024-12-11T02:29:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742553_1729 (size=32107) 2024-12-11T02:29:01,400 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into ddb16b12ebf449a0b823d6c419cbf363(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:01,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:01,400 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884140975; duration=0sec 2024-12-11T02:29:01,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:29:01,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:29:01,400 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:01,400 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4c61f9f5d25e4a0d9439f5cb61d2310a as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a 2024-12-11T02:29:01,401 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:01,401 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:29:01,401 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:01,401 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/09541e5fb85d415bb8d0d242ab4be95a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.8 K 2024-12-11T02:29:01,402 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 09541e5fb85d415bb8d0d242ab4be95a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733884138384 2024-12-11T02:29:01,402 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting b63a8d93f96d41f8bc34b66471e9dec7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733884139303 2024-12-11T02:29:01,403 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting f03886c2a3444c82aee9d1e3dd1d04e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:01,404 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into 4c61f9f5d25e4a0d9439f5cb61d2310a(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:01,404 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:01,404 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884140975; duration=0sec 2024-12-11T02:29:01,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:01,408 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:29:01,409 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#614 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:01,410 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/1673ee338eb8404c936e71c66ee108ab is 50, key is test_row_0/C:col10/1733884139944/Put/seqid=0 2024-12-11T02:29:01,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742554_1730 (size=13153) 2024-12-11T02:29:01,417 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/1673ee338eb8404c936e71c66ee108ab as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1673ee338eb8404c936e71c66ee108ab 2024-12-11T02:29:01,421 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 1673ee338eb8404c936e71c66ee108ab(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:01,421 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:01,421 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884140975; duration=0sec 2024-12-11T02:29:01,422 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:01,422 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:29:01,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884201507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884201510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/008f39407e8d46258f8c555a8e76d09c 2024-12-11T02:29:01,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/32d8c7099e9b476294d3eff99e7086c4 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4 2024-12-11T02:29:01,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4, entries=250, sequenceid=366, filesize=47.5 K 2024-12-11T02:29:01,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/0bde79b7801546bab73f4e369add925b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b 2024-12-11T02:29:01,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b, entries=150, sequenceid=366, filesize=12.0 K 2024-12-11T02:29:01,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/008f39407e8d46258f8c555a8e76d09c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c 2024-12-11T02:29:01,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c, entries=150, sequenceid=366, filesize=12.0 K 2024-12-11T02:29:01,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ee5747d737c855bb22265bdc2d0c886b in 454ms, sequenceid=366, compaction requested=false 2024-12-11T02:29:01,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:01,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:01,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:29:01,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:01,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119fbaa58a97834891986e22c126547ba4_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:01,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742555_1731 (size=12454) 2024-12-11T02:29:01,825 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:01,828 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119fbaa58a97834891986e22c126547ba4_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119fbaa58a97834891986e22c126547ba4_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:01,829 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4df3dff012f94abdb3b2bf7f0e25f0f0, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:01,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4df3dff012f94abdb3b2bf7f0e25f0f0 is 175, key is test_row_0/A:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:01,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742556_1732 (size=31255) 2024-12-11T02:29:01,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884201848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884201849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884201951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:01,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:01,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884201952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49730 deadline: 1733884202072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,073 DEBUG [Thread-2731 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:29:02,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49682 deadline: 1733884202078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,079 DEBUG [Thread-2735 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:29:02,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49714 deadline: 1733884202079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,082 DEBUG [Thread-2729 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., hostname=5f57a24c5131,40311,1733883964600, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T02:29:02,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884202155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884202155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,233 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4df3dff012f94abdb3b2bf7f0e25f0f0 2024-12-11T02:29:02,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/13903ff3521642f6a65cdb54e1100eb6 is 50, key is test_row_0/B:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:02,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742557_1733 (size=12301) 2024-12-11T02:29:02,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/13903ff3521642f6a65cdb54e1100eb6 2024-12-11T02:29:02,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7914ba5121fc4fcaa93a7d0defe83053 is 50, key is test_row_0/C:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:02,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742558_1734 (size=12301) 2024-12-11T02:29:02,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T02:29:02,284 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-11T02:29:02,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T02:29:02,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-11T02:29:02,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:02,287 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T02:29:02,287 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T02:29:02,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T02:29:02,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:02,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T02:29:02,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:29:02,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49750 deadline: 1733884202460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T02:29:02,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40311 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49704 deadline: 1733884202460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,532 DEBUG [Thread-2742 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x503a7d2e to 127.0.0.1:63149 2024-12-11T02:29:02,532 DEBUG [Thread-2742 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,533 DEBUG [Thread-2748 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e06176 to 127.0.0.1:63149 2024-12-11T02:29:02,533 DEBUG [Thread-2748 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,533 DEBUG [Thread-2740 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:63149 2024-12-11T02:29:02,533 DEBUG [Thread-2740 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,534 DEBUG [Thread-2744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x404bb685 to 127.0.0.1:63149 2024-12-11T02:29:02,534 DEBUG [Thread-2744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,535 DEBUG [Thread-2746 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42aacb30 to 127.0.0.1:63149 2024-12-11T02:29:02,535 DEBUG [Thread-2746 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:02,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T02:29:02,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:29:02,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T02:29:02,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7914ba5121fc4fcaa93a7d0defe83053 2024-12-11T02:29:02,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/4df3dff012f94abdb3b2bf7f0e25f0f0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0 2024-12-11T02:29:02,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0, entries=150, sequenceid=383, filesize=30.5 K 2024-12-11T02:29:02,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/13903ff3521642f6a65cdb54e1100eb6 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6 2024-12-11T02:29:02,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T02:29:02,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/7914ba5121fc4fcaa93a7d0defe83053 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053 2024-12-11T02:29:02,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T02:29:02,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for ee5747d737c855bb22265bdc2d0c886b in 852ms, sequenceid=383, compaction requested=true 2024-12-11T02:29:02,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:02,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T02:29:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T02:29:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ee5747d737c855bb22265bdc2d0c886b:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:02,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112001 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/B is initiating minor compaction (all files) 2024-12-11T02:29:02,667 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/A is initiating minor compaction (all files) 2024-12-11T02:29:02,667 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/B in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,667 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/A in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,667 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ddb16b12ebf449a0b823d6c419cbf363, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.9 K 2024-12-11T02:29:02,667 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=109.4 K 2024-12-11T02:29:02,668 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. files: [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0] 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting ddb16b12ebf449a0b823d6c419cbf363, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c61f9f5d25e4a0d9439f5cb61d2310a, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bde79b7801546bab73f4e369add925b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884140540 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32d8c7099e9b476294d3eff99e7086c4, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884140540 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 13903ff3521642f6a65cdb54e1100eb6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733884141194 2024-12-11T02:29:02,668 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4df3dff012f94abdb3b2bf7f0e25f0f0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733884141194 2024-12-11T02:29:02,673 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:02,673 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#B#compaction#618 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:02,673 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/0e5f420cc0204efbae33add838496a9f is 50, key is test_row_0/B:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:02,674 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412118a730b4cd3c94fd3b617664df6b1e806_ee5747d737c855bb22265bdc2d0c886b store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742559_1735 (size=13255) 2024-12-11T02:29:02,678 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412118a730b4cd3c94fd3b617664df6b1e806_ee5747d737c855bb22265bdc2d0c886b, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:02,678 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118a730b4cd3c94fd3b617664df6b1e806_ee5747d737c855bb22265bdc2d0c886b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742560_1736 (size=4469) 2024-12-11T02:29:02,744 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:29:02,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40311 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:02,745 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:29:02,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:02,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211827cca7f53064487be203b4678ea4ce9_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884141847/Put/seqid=0 2024-12-11T02:29:02,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742561_1737 (size=12454) 2024-12-11T02:29:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:02,933 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T02:29:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40311 {}] regionserver.HRegion(8581): Flush requested on ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:02,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. as already flushing 2024-12-11T02:29:02,969 DEBUG [Thread-2737 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:63149 2024-12-11T02:29:02,969 DEBUG [Thread-2733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:63149 2024-12-11T02:29:02,969 DEBUG [Thread-2733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:02,969 DEBUG [Thread-2737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:03,080 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/0e5f420cc0204efbae33add838496a9f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0e5f420cc0204efbae33add838496a9f 2024-12-11T02:29:03,083 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/B of ee5747d737c855bb22265bdc2d0c886b into 0e5f420cc0204efbae33add838496a9f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:03,083 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:03,083 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/B, priority=13, startTime=1733884142667; duration=0sec 2024-12-11T02:29:03,083 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T02:29:03,083 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:B 2024-12-11T02:29:03,083 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T02:29:03,083 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#A#compaction#619 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:03,083 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/dc40e8582322426bb02339b069c6f095 is 175, key is test_row_0/A:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:03,084 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T02:29:03,084 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1540): ee5747d737c855bb22265bdc2d0c886b/C is initiating minor compaction (all files) 2024-12-11T02:29:03,084 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ee5747d737c855bb22265bdc2d0c886b/C in TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:03,084 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1673ee338eb8404c936e71c66ee108ab, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053] into tmpdir=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp, totalSize=36.9 K 2024-12-11T02:29:03,084 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 1673ee338eb8404c936e71c66ee108ab, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733884139944 2024-12-11T02:29:03,084 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 008f39407e8d46258f8c555a8e76d09c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733884140540 2024-12-11T02:29:03,085 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] compactions.Compactor(224): Compacting 7914ba5121fc4fcaa93a7d0defe83053, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733884141194 2024-12-11T02:29:03,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742562_1738 (size=32209) 2024-12-11T02:29:03,089 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/dc40e8582322426bb02339b069c6f095 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/dc40e8582322426bb02339b069c6f095 2024-12-11T02:29:03,091 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ee5747d737c855bb22265bdc2d0c886b#C#compaction#621 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T02:29:03,091 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/2fc5f5fb0e9a459d8f3f1e110e7a7e6c is 50, key is test_row_0/C:col10/1733884141202/Put/seqid=0 2024-12-11T02:29:03,093 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/A of ee5747d737c855bb22265bdc2d0c886b into dc40e8582322426bb02339b069c6f095(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:03,093 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:03,093 INFO [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/A, priority=13, startTime=1733884142666; duration=0sec 2024-12-11T02:29:03,093 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:03,093 DEBUG [RS:0;5f57a24c5131:40311-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:A 2024-12-11T02:29:03,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742563_1739 (size=13255) 2024-12-11T02:29:03,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:03,156 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211827cca7f53064487be203b4678ea4ce9_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211827cca7f53064487be203b4678ea4ce9_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:03,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/1f09a8859f6e4b3ba1674bfaa01c1a30, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:03,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/1f09a8859f6e4b3ba1674bfaa01c1a30 is 175, key is test_row_0/A:col10/1733884141847/Put/seqid=0 2024-12-11T02:29:03,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742564_1740 (size=31255) 2024-12-11T02:29:03,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:03,498 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/2fc5f5fb0e9a459d8f3f1e110e7a7e6c as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/2fc5f5fb0e9a459d8f3f1e110e7a7e6c 2024-12-11T02:29:03,501 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ee5747d737c855bb22265bdc2d0c886b/C of ee5747d737c855bb22265bdc2d0c886b into 2fc5f5fb0e9a459d8f3f1e110e7a7e6c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T02:29:03,501 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:03,502 INFO [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b., storeName=ee5747d737c855bb22265bdc2d0c886b/C, priority=13, startTime=1733884142667; duration=0sec 2024-12-11T02:29:03,502 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T02:29:03,502 DEBUG [RS:0;5f57a24c5131:40311-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ee5747d737c855bb22265bdc2d0c886b:C 2024-12-11T02:29:03,561 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=406, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/1f09a8859f6e4b3ba1674bfaa01c1a30 2024-12-11T02:29:03,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/fd70c94511454abaa3f88afb7d264cd0 is 50, key is test_row_0/B:col10/1733884141847/Put/seqid=0 2024-12-11T02:29:03,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742565_1741 (size=12301) 2024-12-11T02:29:03,970 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/fd70c94511454abaa3f88afb7d264cd0 2024-12-11T02:29:03,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/c560b5a111c247228a05ec74ede77280 is 50, key is test_row_0/C:col10/1733884141847/Put/seqid=0 2024-12-11T02:29:03,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742566_1742 (size=12301) 2024-12-11T02:29:04,379 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/c560b5a111c247228a05ec74ede77280 2024-12-11T02:29:04,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/1f09a8859f6e4b3ba1674bfaa01c1a30 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/1f09a8859f6e4b3ba1674bfaa01c1a30 2024-12-11T02:29:04,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/1f09a8859f6e4b3ba1674bfaa01c1a30, entries=150, sequenceid=406, filesize=30.5 K 2024-12-11T02:29:04,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/fd70c94511454abaa3f88afb7d264cd0 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/fd70c94511454abaa3f88afb7d264cd0 2024-12-11T02:29:04,387 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/fd70c94511454abaa3f88afb7d264cd0, entries=150, sequenceid=406, filesize=12.0 K 2024-12-11T02:29:04,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/c560b5a111c247228a05ec74ede77280 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c560b5a111c247228a05ec74ede77280 2024-12-11T02:29:04,390 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c560b5a111c247228a05ec74ede77280, entries=150, sequenceid=406, filesize=12.0 K 2024-12-11T02:29:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:04,391 INFO [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=13.42 KB/13740 for ee5747d737c855bb22265bdc2d0c886b in 1646ms, sequenceid=406, compaction requested=false 2024-12-11T02:29:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f57a24c5131:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-11T02:29:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-11T02:29:04,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-11T02:29:04,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1050 sec 2024-12-11T02:29:04,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.1070 sec 2024-12-11T02:29:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T02:29:06,391 INFO [Thread-2739 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-11T02:29:12,089 DEBUG [Thread-2731 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:63149 2024-12-11T02:29:12,089 DEBUG [Thread-2731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:12,146 DEBUG [Thread-2735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:63149 2024-12-11T02:29:12,146 DEBUG [Thread-2735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:12,181 DEBUG [Thread-2729 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:63149 2024-12-11T02:29:12,181 DEBUG [Thread-2729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 136 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6716 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6539 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6442 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6740 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6532 2024-12-11T02:29:12,181 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T02:29:12,181 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:29:12,181 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:63149 2024-12-11T02:29:12,181 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:12,182 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T02:29:12,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T02:29:12,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:12,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:12,185 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884152184"}]},"ts":"1733884152184"} 2024-12-11T02:29:12,185 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T02:29:12,187 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T02:29:12,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T02:29:12,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, UNASSIGN}] 2024-12-11T02:29:12,189 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, UNASSIGN 2024-12-11T02:29:12,189 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=CLOSING, regionLocation=5f57a24c5131,40311,1733883964600 2024-12-11T02:29:12,190 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T02:29:12,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600}] 2024-12-11T02:29:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:12,341 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f57a24c5131,40311,1733883964600 2024-12-11T02:29:12,342 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing ee5747d737c855bb22265bdc2d0c886b, disabling compactions & flushes 2024-12-11T02:29:12,342 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. after waiting 0 ms 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:12,342 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing ee5747d737c855bb22265bdc2d0c886b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=A 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=B 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ee5747d737c855bb22265bdc2d0c886b, store=C 2024-12-11T02:29:12,342 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T02:29:12,347 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211965a694c089545308c69d34d2ca1e310_ee5747d737c855bb22265bdc2d0c886b is 50, key is test_row_0/A:col10/1733884152088/Put/seqid=0 2024-12-11T02:29:12,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742567_1743 (size=12454) 2024-12-11T02:29:12,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:12,750 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T02:29:12,753 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211965a694c089545308c69d34d2ca1e310_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211965a694c089545308c69d34d2ca1e310_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:12,754 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a973743ff3bf419d874c0bb9801cda65, store: [table=TestAcidGuarantees family=A region=ee5747d737c855bb22265bdc2d0c886b] 2024-12-11T02:29:12,755 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a973743ff3bf419d874c0bb9801cda65 is 175, key is test_row_0/A:col10/1733884152088/Put/seqid=0 2024-12-11T02:29:12,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742568_1744 (size=31255) 2024-12-11T02:29:12,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:13,158 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=417, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a973743ff3bf419d874c0bb9801cda65 2024-12-11T02:29:13,163 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 is 50, key is test_row_0/B:col10/1733884152088/Put/seqid=0 2024-12-11T02:29:13,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742569_1745 (size=12301) 2024-12-11T02:29:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:13,567 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 2024-12-11T02:29:13,572 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/93ccb3b9784a4639a3a27c8501837714 is 50, key is test_row_0/C:col10/1733884152088/Put/seqid=0 2024-12-11T02:29:13,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742570_1746 (size=12301) 2024-12-11T02:29:13,976 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/93ccb3b9784a4639a3a27c8501837714 2024-12-11T02:29:13,979 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/A/a973743ff3bf419d874c0bb9801cda65 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a973743ff3bf419d874c0bb9801cda65 2024-12-11T02:29:13,981 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a973743ff3bf419d874c0bb9801cda65, entries=150, sequenceid=417, filesize=30.5 K 2024-12-11T02:29:13,982 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 2024-12-11T02:29:13,984 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/db4bfe0d1b5148f9b0a31cc2cc6ace94, entries=150, sequenceid=417, filesize=12.0 K 2024-12-11T02:29:13,984 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/.tmp/C/93ccb3b9784a4639a3a27c8501837714 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/93ccb3b9784a4639a3a27c8501837714 2024-12-11T02:29:13,986 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/93ccb3b9784a4639a3a27c8501837714, entries=150, sequenceid=417, filesize=12.0 K 2024-12-11T02:29:13,987 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for ee5747d737c855bb22265bdc2d0c886b in 1645ms, sequenceid=417, compaction requested=true 2024-12-11T02:29:13,988 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0] to archive 2024-12-11T02:29:13,988 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:29:13,990 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/f25b269b51c349f4b1c70f24090f01e8 2024-12-11T02:29:13,990 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c23f65b7254c7abbc50cbd1a50f600 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4e00f821097d4f2784a21b5c701a2066 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/58a77e271d6b434ab2fb8b158bcfa878 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/e5cc64256f704734b5c819dcba9bdd01 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4ee20f82cd6b4844a2ce06adc76d0f71 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ae62dfb5ad6f417e8c509deec3523a06 2024-12-11T02:29:13,991 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/26d7bd1418ce4acaa5edbb6534f0e6de 2024-12-11T02:29:13,992 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/eaf324664c4e400f887214cff2b515bf 2024-12-11T02:29:13,992 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/ec640ca24b67408991017bd50a5b5a73 2024-12-11T02:29:13,992 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/de4b036bc2fe459b83e9d795a9385af5 2024-12-11T02:29:13,992 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3091004dfca84befb556e01f4cebe5f0 2024-12-11T02:29:13,993 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a75e217895c14947826a9d20b7693f4e 2024-12-11T02:29:13,993 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/020822e008e746f5a7cdf26067798ece 2024-12-11T02:29:13,993 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/70c6729342d04b80a283ece5e1bf73e3 2024-12-11T02:29:13,993 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/3c01e45d5bc74ee7abfb9f9aad7cb49e 2024-12-11T02:29:13,994 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c8cd41b086854dfeb2dfa55e88b9365d 2024-12-11T02:29:13,994 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/287f950e800447519b20a8e88db38e89 2024-12-11T02:29:13,994 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/91f775b81a4f4c2c952bda5eb33ef09f 2024-12-11T02:29:13,994 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a9102870601741e5804701f344261f55 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/0b379dcf88f14a2cae928a7678951a58 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/196ec90b5fec4627815c9d9de6c4dd59 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/86f9aefd83f343dbb956333acabe9ff8 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/d1c5adcfe3a44975a3c566cd08111b4c 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/c4f16967f1b74a689afa8aa78d99c55f 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4c61f9f5d25e4a0d9439f5cb61d2310a 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/32d8c7099e9b476294d3eff99e7086c4 2024-12-11T02:29:13,995 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/4df3dff012f94abdb3b2bf7f0e25f0f0 2024-12-11T02:29:13,997 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/dffdb6374cff4c6cb55eb29f9edb3a39, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ee5373a2f3174960a4c14682f29189a3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/d254858474ec4078b593030997e290c4, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/9c0835e89bc1416f98ad7ef33db2685f, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/172b37802ab54224a4f9b68977de31c6, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a311fdefefe24ce1bf0fe3727bf8c338, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/7d225700cd7c4ab1b861e0de53133f8e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ddb16b12ebf449a0b823d6c419cbf363, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6] to archive 2024-12-11T02:29:13,997 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/c53965a06dc14172a526f70e675f93cc 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/e1495e1233644834bc6e8682875231d4 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/90dd7cc43c2545d3ab48bcaa3c0bb5b2 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/dffdb6374cff4c6cb55eb29f9edb3a39 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/dffdb6374cff4c6cb55eb29f9edb3a39 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/4a48e9afb45c41cca15868ece8212ff3 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a5bc5a212d6348888244b000743a1359 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/951ae604bfcf4ef7b6ce4648e5a5e37a 2024-12-11T02:29:13,999 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ee5373a2f3174960a4c14682f29189a3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ee5373a2f3174960a4c14682f29189a3 2024-12-11T02:29:14,000 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/5c7dd4c9d6d54567bc0aab9032184968 2024-12-11T02:29:14,000 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/d254858474ec4078b593030997e290c4 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/d254858474ec4078b593030997e290c4 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ff15c5defea04b6c95490fc630e194fe 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/9c0835e89bc1416f98ad7ef33db2685f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/9c0835e89bc1416f98ad7ef33db2685f 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/19d36e37f43e494baca1423d6e6316d2 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/377f6a5041894360978f28d11d912d63 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/3b30d74f91bc43c1b98778070b0623b9 2024-12-11T02:29:14,001 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/172b37802ab54224a4f9b68977de31c6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/172b37802ab54224a4f9b68977de31c6 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/29c412ad60b54051b60aa4682bbb1325 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/44869d1ba39e488eaedd3e2e24452de0 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a311fdefefe24ce1bf0fe3727bf8c338 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/a311fdefefe24ce1bf0fe3727bf8c338 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/2150e4ba65464bcbb02b105591ed2287 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/cd79f54e262849e2ad504678554c69d5 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/af41ffe885b14dddae880c35d03aab55 2024-12-11T02:29:14,002 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/7d225700cd7c4ab1b861e0de53133f8e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/7d225700cd7c4ab1b861e0de53133f8e 2024-12-11T02:29:14,003 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/59dc08ab57784536ac27d8d42853c56d 2024-12-11T02:29:14,003 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ddb16b12ebf449a0b823d6c419cbf363 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/ddb16b12ebf449a0b823d6c419cbf363 2024-12-11T02:29:14,003 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/007236ee05df4e8bb5fab733e18caab3 2024-12-11T02:29:14,003 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0bde79b7801546bab73f4e369add925b 2024-12-11T02:29:14,003 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/13903ff3521642f6a65cdb54e1100eb6 2024-12-11T02:29:14,004 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d1dc45d8bc48451fb4c930ef5cd7034a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c4804f6b13a549739f704ca20e0c034c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/beed3ebf1c76433bab7da39fdcf8dc0e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8e265d0647b74a4e80e3568be94cf120, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3d74ba2ea4fd4421a0bdd079c20b41fe, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6aecd284ba2149f6ad1e7c88c6106df0, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/09541e5fb85d415bb8d0d242ab4be95a, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1673ee338eb8404c936e71c66ee108ab, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053] to archive 2024-12-11T02:29:14,005 DEBUG [StoreCloser-TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8931b62d6f324c72bb56a5c0b05df3af 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3e76601c00d044ba96705095ee589c9a 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c4804f6b13a549739f704ca20e0c034c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c4804f6b13a549739f704ca20e0c034c 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d0dc989ec73445c0a180e587aaad5537 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/232473155fb648f093c2bca235bbb290 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/66d0d4f9514344f3a9fa29580b8bbead 2024-12-11T02:29:14,008 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/86cf1151002b4f0a8672214131b8bc62 2024-12-11T02:29:14,009 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d1dc45d8bc48451fb4c930ef5cd7034a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/d1dc45d8bc48451fb4c930ef5cd7034a 2024-12-11T02:29:14,009 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7bdb12544603403ebf827aa6e3df3006 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/beed3ebf1c76433bab7da39fdcf8dc0e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/beed3ebf1c76433bab7da39fdcf8dc0e 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6f6b94ee2f6749b2b432b392c9dabc6d 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/503971c17b8846c4ae1e0540420fc86d 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/e9a0911b82fc47b49a4fbe3575da669e 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/0cedad35cc26498985f60d5a4750a33e 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8e265d0647b74a4e80e3568be94cf120 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/8e265d0647b74a4e80e3568be94cf120 2024-12-11T02:29:14,010 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3d74ba2ea4fd4421a0bdd079c20b41fe to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/3d74ba2ea4fd4421a0bdd079c20b41fe 2024-12-11T02:29:14,011 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/9bfe2cf79c1d482b85232fca7865668e 2024-12-11T02:29:14,011 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6aecd284ba2149f6ad1e7c88c6106df0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/6aecd284ba2149f6ad1e7c88c6106df0 2024-12-11T02:29:14,011 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/12d9169a2fab4c409c9467c91ca51e3b 2024-12-11T02:29:14,011 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/24cb357534d3413e833c0378bcd59c90 2024-12-11T02:29:14,011 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/130dcf39bbeb4b51966598875fc3a006 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/09541e5fb85d415bb8d0d242ab4be95a to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/09541e5fb85d415bb8d0d242ab4be95a 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/b63a8d93f96d41f8bc34b66471e9dec7 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1ba2a8d59f144c1d9487f6e76cf6ca0b 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1673ee338eb8404c936e71c66ee108ab to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/1673ee338eb8404c936e71c66ee108ab 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/f03886c2a3444c82aee9d1e3dd1d04e9 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/7914ba5121fc4fcaa93a7d0defe83053 2024-12-11T02:29:14,012 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/008f39407e8d46258f8c555a8e76d09c 2024-12-11T02:29:14,015 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits/420.seqid, newMaxSeqId=420, maxSeqId=4 2024-12-11T02:29:14,016 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b. 2024-12-11T02:29:14,016 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for ee5747d737c855bb22265bdc2d0c886b: 2024-12-11T02:29:14,017 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=ee5747d737c855bb22265bdc2d0c886b, regionState=CLOSED 2024-12-11T02:29:14,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-11T02:29:14,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure ee5747d737c855bb22265bdc2d0c886b, server=5f57a24c5131,40311,1733883964600 in 1.8280 sec 2024-12-11T02:29:14,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-11T02:29:14,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ee5747d737c855bb22265bdc2d0c886b, UNASSIGN in 1.8310 sec 2024-12-11T02:29:14,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-11T02:29:14,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8330 sec 2024-12-11T02:29:14,022 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733884154021"}]},"ts":"1733884154021"} 2024-12-11T02:29:14,022 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T02:29:14,024 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T02:29:14,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8420 sec 2024-12-11T02:29:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T02:29:14,288 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-11T02:29:14,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T02:29:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,289 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T02:29:14,290 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,292 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,293 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C, FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits] 2024-12-11T02:29:14,296 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/dc40e8582322426bb02339b069c6f095 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/dc40e8582322426bb02339b069c6f095 2024-12-11T02:29:14,296 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/1f09a8859f6e4b3ba1674bfaa01c1a30 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/1f09a8859f6e4b3ba1674bfaa01c1a30 2024-12-11T02:29:14,296 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a973743ff3bf419d874c0bb9801cda65 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/A/a973743ff3bf419d874c0bb9801cda65 2024-12-11T02:29:14,298 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/db4bfe0d1b5148f9b0a31cc2cc6ace94 2024-12-11T02:29:14,298 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0e5f420cc0204efbae33add838496a9f to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/0e5f420cc0204efbae33add838496a9f 2024-12-11T02:29:14,298 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/fd70c94511454abaa3f88afb7d264cd0 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/B/fd70c94511454abaa3f88afb7d264cd0 2024-12-11T02:29:14,300 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/93ccb3b9784a4639a3a27c8501837714 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/93ccb3b9784a4639a3a27c8501837714 2024-12-11T02:29:14,300 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/2fc5f5fb0e9a459d8f3f1e110e7a7e6c to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/2fc5f5fb0e9a459d8f3f1e110e7a7e6c 2024-12-11T02:29:14,300 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c560b5a111c247228a05ec74ede77280 to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/C/c560b5a111c247228a05ec74ede77280 2024-12-11T02:29:14,302 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits/420.seqid to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b/recovered.edits/420.seqid 2024-12-11T02:29:14,303 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/default/TestAcidGuarantees/ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,303 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T02:29:14,303 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:29:14,304 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121100ae9dd17ad04716ae9d9785875de94e_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121100ae9dd17ad04716ae9d9785875de94e_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110233689068c44623b8dc94e3c96c94b2_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110233689068c44623b8dc94e3c96c94b2_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121154a67ff19c9e4d2cadbc989a2d763e60_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121154a67ff19c9e4d2cadbc989a2d763e60_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211827cca7f53064487be203b4678ea4ce9_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211827cca7f53064487be203b4678ea4ce9_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121138a84c7994b24b9b832eb2b547defcfd_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121138a84c7994b24b9b832eb2b547defcfd_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114963bff6c224488e8f6fef6a4cdc6260_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114963bff6c224488e8f6fef6a4cdc6260_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116659b8003c644a8aa963ef84c6e182b0_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116659b8003c644a8aa963ef84c6e182b0_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,310 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f9f6d618eba401f83981bc6c70193de_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f9f6d618eba401f83981bc6c70193de_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,311 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118807ee7c9b144f288bba79336c113e1b_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118807ee7c9b144f288bba79336c113e1b_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,311 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118a2c7d3f12084c568b1122456715fcdf_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118a2c7d3f12084c568b1122456715fcdf_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,311 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190ad049d42214c15adb45cb7b95df720_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121190ad049d42214c15adb45cb7b95df720_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,311 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121191989b15ad01438285944eb323e4a42b_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121191989b15ad01438285944eb323e4a42b_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,312 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211965a694c089545308c69d34d2ca1e310_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211965a694c089545308c69d34d2ca1e310_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,312 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121195c7fe085b6f41cea4b36fb3715f4811_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121195c7fe085b6f41cea4b36fb3715f4811_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,312 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211925863aa6c27465bbced81c6d0ab6f46_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211925863aa6c27465bbced81c6d0ab6f46_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,312 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119cd70c4b27594f60af55dab304031221_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119cd70c4b27594f60af55dab304031221_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119fbaa58a97834891986e22c126547ba4_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119fbaa58a97834891986e22c126547ba4_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211aba3abbd874f497dbaaf5a34fbd8446f_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211aba3abbd874f497dbaaf5a34fbd8446f_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c65b835b70864aff9e2d37e1d7b356aa_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c65b835b70864aff9e2d37e1d7b356aa_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d84df168a38144bf91cb68a15c430a1a_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d84df168a38144bf91cb68a15c430a1a_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211cf7a075897b44fd19157639effc346b9_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211cf7a075897b44fd19157639effc346b9_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,313 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e733dd8a54b84337a793571337c0749c_ee5747d737c855bb22265bdc2d0c886b to hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e733dd8a54b84337a793571337c0749c_ee5747d737c855bb22265bdc2d0c886b 2024-12-11T02:29:14,314 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T02:29:14,316 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,318 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T02:29:14,320 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T02:29:14,321 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,321 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T02:29:14,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733884154321"}]},"ts":"9223372036854775807"} 2024-12-11T02:29:14,322 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T02:29:14,322 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ee5747d737c855bb22265bdc2d0c886b, NAME => 'TestAcidGuarantees,,1733884119480.ee5747d737c855bb22265bdc2d0c886b.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T02:29:14,322 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T02:29:14,323 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733884154323"}]},"ts":"9223372036854775807"} 2024-12-11T02:29:14,324 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T02:29:14,325 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T02:29:14,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 37 msec 2024-12-11T02:29:14,351 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-11T02:29:14,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40407 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T02:29:14,391 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-11T02:29:14,401 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244 (was 244), OpenFileDescriptor=447 (was 447), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=349 (was 361), ProcessCount=11 (was 11), AvailableMemoryMB=4252 (was 4281) 2024-12-11T02:29:14,401 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-11T02:29:14,401 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T02:29:14,401 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:63149 2024-12-11T02:29:14,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:14,401 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T02:29:14,401 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=67080707, stopped=false 2024-12-11T02:29:14,401 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=5f57a24c5131,40407,1733883963836 2024-12-11T02:29:14,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T02:29:14,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T02:29:14,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:29:14,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:29:14,403 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-11T02:29:14,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:14,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T02:29:14,404 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5f57a24c5131,40311,1733883964600' ***** 2024-12-11T02:29:14,404 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-11T02:29:14,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T02:29:14,404 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-11T02:29:14,404 INFO [RS:0;5f57a24c5131:40311 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T02:29:14,404 INFO [RS:0;5f57a24c5131:40311 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T02:29:14,404 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-11T02:29:14,404 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(3579): Received CLOSE for 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1224): stopping server 5f57a24c5131,40311,1733883964600 2024-12-11T02:29:14,405 DEBUG [RS:0;5f57a24c5131:40311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-11T02:29:14,405 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 5519ba8b50773a902ba9dca0bed2059c, disabling compactions & flushes 2024-12-11T02:29:14,405 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-11T02:29:14,405 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1603): Online Regions={5519ba8b50773a902ba9dca0bed2059c=hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c., 1588230740=hbase:meta,,1.1588230740} 2024-12-11T02:29:14,405 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:29:14,405 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:29:14,405 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. after waiting 0 ms 2024-12-11T02:29:14,406 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:29:14,406 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 5519ba8b50773a902ba9dca0bed2059c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-11T02:29:14,406 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-11T02:29:14,406 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-11T02:29:14,406 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-11T02:29:14,406 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T02:29:14,406 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T02:29:14,406 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-11T02:29:14,409 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:29:14,410 INFO [regionserver/5f57a24c5131:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T02:29:14,427 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/.tmp/info/a9ce846c4d0a4dd5a61b1312352a43db is 45, key is default/info:d/1733883969159/Put/seqid=0 2024-12-11T02:29:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742571_1747 (size=5037) 2024-12-11T02:29:14,434 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/info/34d57f9308b8459782d401f323fc177b is 143, key is hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c./info:regioninfo/1733883969049/Put/seqid=0 2024-12-11T02:29:14,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742572_1748 (size=7725) 2024-12-11T02:29:14,609 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:29:14,810 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 5519ba8b50773a902ba9dca0bed2059c 2024-12-11T02:29:14,818 INFO [regionserver/5f57a24c5131:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T02:29:14,818 INFO [regionserver/5f57a24c5131:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T02:29:14,831 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/.tmp/info/a9ce846c4d0a4dd5a61b1312352a43db 2024-12-11T02:29:14,834 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/.tmp/info/a9ce846c4d0a4dd5a61b1312352a43db as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/info/a9ce846c4d0a4dd5a61b1312352a43db 2024-12-11T02:29:14,837 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/info/a9ce846c4d0a4dd5a61b1312352a43db, entries=2, sequenceid=6, filesize=4.9 K 2024-12-11T02:29:14,837 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/info/34d57f9308b8459782d401f323fc177b 2024-12-11T02:29:14,837 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 5519ba8b50773a902ba9dca0bed2059c in 431ms, sequenceid=6, compaction requested=false 2024-12-11T02:29:14,841 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/namespace/5519ba8b50773a902ba9dca0bed2059c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T02:29:14,841 INFO [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:29:14,841 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 5519ba8b50773a902ba9dca0bed2059c: 2024-12-11T02:29:14,841 DEBUG [RS_CLOSE_REGION-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733883967802.5519ba8b50773a902ba9dca0bed2059c. 2024-12-11T02:29:14,855 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/rep_barrier/7dac8f1afbec4500a3b91187057ef95f is 102, key is TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956./rep_barrier:/1733883996673/DeleteFamily/seqid=0 2024-12-11T02:29:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742573_1749 (size=6025) 2024-12-11T02:29:15,010 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T02:29:15,210 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T02:29:15,258 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/rep_barrier/7dac8f1afbec4500a3b91187057ef95f 2024-12-11T02:29:15,276 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/table/60691cc3d66b4d50b89e949fc9912255 is 96, key is TestAcidGuarantees,,1733883969362.422539d3733f091ff661b5e7e0fc5956./table:/1733883996673/DeleteFamily/seqid=0 2024-12-11T02:29:15,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742574_1750 (size=5942) 2024-12-11T02:29:15,410 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-11T02:29:15,411 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T02:29:15,411 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T02:29:15,611 DEBUG [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T02:29:15,679 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/table/60691cc3d66b4d50b89e949fc9912255 2024-12-11T02:29:15,683 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/info/34d57f9308b8459782d401f323fc177b as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/info/34d57f9308b8459782d401f323fc177b 2024-12-11T02:29:15,686 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/info/34d57f9308b8459782d401f323fc177b, entries=22, sequenceid=93, filesize=7.5 K 2024-12-11T02:29:15,686 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/rep_barrier/7dac8f1afbec4500a3b91187057ef95f as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/rep_barrier/7dac8f1afbec4500a3b91187057ef95f 2024-12-11T02:29:15,689 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/rep_barrier/7dac8f1afbec4500a3b91187057ef95f, entries=6, sequenceid=93, filesize=5.9 K 2024-12-11T02:29:15,689 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/.tmp/table/60691cc3d66b4d50b89e949fc9912255 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/table/60691cc3d66b4d50b89e949fc9912255 2024-12-11T02:29:15,692 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/table/60691cc3d66b4d50b89e949fc9912255, entries=9, sequenceid=93, filesize=5.8 K 2024-12-11T02:29:15,692 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1286ms, sequenceid=93, compaction requested=false 2024-12-11T02:29:15,696 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-11T02:29:15,696 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T02:29:15,696 INFO [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-11T02:29:15,696 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-11T02:29:15,697 DEBUG [RS_CLOSE_META-regionserver/5f57a24c5131:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T02:29:15,811 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1250): stopping server 5f57a24c5131,40311,1733883964600; all regions closed. 2024-12-11T02:29:15,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741834_1010 (size=26050) 2024-12-11T02:29:15,817 DEBUG [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/oldWALs 2024-12-11T02:29:15,817 INFO [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5f57a24c5131%2C40311%2C1733883964600.meta:.meta(num 1733883967551) 2024-12-11T02:29:15,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741833_1009 (size=17228928) 2024-12-11T02:29:15,820 DEBUG [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/oldWALs 2024-12-11T02:29:15,820 INFO [RS:0;5f57a24c5131:40311 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5f57a24c5131%2C40311%2C1733883964600:(num 1733883966990) 2024-12-11T02:29:15,820 DEBUG [RS:0;5f57a24c5131:40311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:15,820 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T02:29:15,821 INFO [RS:0;5f57a24c5131:40311 {}] hbase.ChoreService(370): Chore service for: regionserver/5f57a24c5131:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T02:29:15,821 INFO [regionserver/5f57a24c5131:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-11T02:29:15,821 INFO [RS:0;5f57a24c5131:40311 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40311 2024-12-11T02:29:15,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5f57a24c5131,40311,1733883964600 2024-12-11T02:29:15,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T02:29:15,827 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5f57a24c5131,40311,1733883964600] 2024-12-11T02:29:15,827 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5f57a24c5131,40311,1733883964600; numProcessing=1 2024-12-11T02:29:15,828 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5f57a24c5131,40311,1733883964600 already deleted, retry=false 2024-12-11T02:29:15,828 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5f57a24c5131,40311,1733883964600 expired; onlineServers=0 2024-12-11T02:29:15,828 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5f57a24c5131,40407,1733883963836' ***** 2024-12-11T02:29:15,828 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T02:29:15,829 DEBUG [M:0;5f57a24c5131:40407 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238f856c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5f57a24c5131/172.17.0.2:0 2024-12-11T02:29:15,829 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegionServer(1224): stopping server 5f57a24c5131,40407,1733883963836 2024-12-11T02:29:15,829 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegionServer(1250): stopping server 5f57a24c5131,40407,1733883963836; all regions closed. 2024-12-11T02:29:15,829 DEBUG [M:0;5f57a24c5131:40407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T02:29:15,829 DEBUG [M:0;5f57a24c5131:40407 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T02:29:15,829 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T02:29:15,829 DEBUG [M:0;5f57a24c5131:40407 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T02:29:15,829 DEBUG [master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.large.0-1733883966701 {}] cleaner.HFileCleaner(306): Exit Thread[master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.large.0-1733883966701,5,FailOnTimeoutGroup] 2024-12-11T02:29:15,829 DEBUG [master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.small.0-1733883966702 {}] cleaner.HFileCleaner(306): Exit Thread[master/5f57a24c5131:0:becomeActiveMaster-HFileCleaner.small.0-1733883966702,5,FailOnTimeoutGroup] 2024-12-11T02:29:15,829 INFO [M:0;5f57a24c5131:40407 {}] hbase.ChoreService(370): Chore service for: master/5f57a24c5131:0 had [] on shutdown 2024-12-11T02:29:15,830 DEBUG [M:0;5f57a24c5131:40407 {}] master.HMaster(1733): Stopping service threads 2024-12-11T02:29:15,830 INFO [M:0;5f57a24c5131:40407 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T02:29:15,830 ERROR [M:0;5f57a24c5131:40407 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:37113 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:37113,5,PEWorkerGroup] 2024-12-11T02:29:15,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T02:29:15,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T02:29:15,830 INFO [M:0;5f57a24c5131:40407 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T02:29:15,831 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T02:29:15,831 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T02:29:15,831 DEBUG [M:0;5f57a24c5131:40407 {}] zookeeper.ZKUtil(347): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T02:29:15,831 WARN [M:0;5f57a24c5131:40407 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T02:29:15,831 INFO [M:0;5f57a24c5131:40407 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-11T02:29:15,831 INFO [M:0;5f57a24c5131:40407 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T02:29:15,831 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T02:29:15,831 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:29:15,831 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:29:15,831 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T02:29:15,831 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:29:15,832 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=789.11 KB heapSize=971.67 KB 2024-12-11T02:29:15,846 DEBUG [M:0;5f57a24c5131:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bd6564f0ae6644a5a61b90176a63cd15 is 82, key is hbase:meta,,1/info:regioninfo/1733883967694/Put/seqid=0 2024-12-11T02:29:15,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742575_1751 (size=5672) 2024-12-11T02:29:15,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T02:29:15,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40311-0x1007ee55f5b0001, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T02:29:15,927 INFO [RS:0;5f57a24c5131:40311 {}] regionserver.HRegionServer(1307): Exiting; stopping=5f57a24c5131,40311,1733883964600; zookeeper connection closed. 2024-12-11T02:29:15,927 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a094fab {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a094fab 2024-12-11T02:29:15,928 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-11T02:29:16,250 INFO [M:0;5f57a24c5131:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2253 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bd6564f0ae6644a5a61b90176a63cd15 2024-12-11T02:29:16,271 DEBUG [M:0;5f57a24c5131:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bee70745e8f3423991f4d5b4f0e45941 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1733884122497/Put/seqid=0 2024-12-11T02:29:16,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742576_1752 (size=45823) 2024-12-11T02:29:16,676 INFO [M:0;5f57a24c5131:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=788.56 KB at sequenceid=2253 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bee70745e8f3423991f4d5b4f0e45941 2024-12-11T02:29:16,680 INFO [M:0;5f57a24c5131:40407 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bee70745e8f3423991f4d5b4f0e45941 2024-12-11T02:29:16,702 DEBUG [M:0;5f57a24c5131:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f22eabe420de42448efdc6a35f7c1b40 is 69, key is 5f57a24c5131,40311,1733883964600/rs:state/1733883966742/Put/seqid=0 2024-12-11T02:29:16,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073742577_1753 (size=5156) 2024-12-11T02:29:17,107 INFO [M:0;5f57a24c5131:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2253 (bloomFilter=true), to=hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f22eabe420de42448efdc6a35f7c1b40 2024-12-11T02:29:17,110 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bd6564f0ae6644a5a61b90176a63cd15 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bd6564f0ae6644a5a61b90176a63cd15 2024-12-11T02:29:17,113 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bd6564f0ae6644a5a61b90176a63cd15, entries=8, sequenceid=2253, filesize=5.5 K 2024-12-11T02:29:17,114 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bee70745e8f3423991f4d5b4f0e45941 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bee70745e8f3423991f4d5b4f0e45941 2024-12-11T02:29:17,117 INFO [M:0;5f57a24c5131:40407 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bee70745e8f3423991f4d5b4f0e45941 2024-12-11T02:29:17,117 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bee70745e8f3423991f4d5b4f0e45941, entries=183, sequenceid=2253, filesize=44.7 K 2024-12-11T02:29:17,117 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f22eabe420de42448efdc6a35f7c1b40 as hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f22eabe420de42448efdc6a35f7c1b40 2024-12-11T02:29:17,120 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37113/user/jenkins/test-data/08ce1d80-96a7-e700-03ec-f05bb66cfee6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f22eabe420de42448efdc6a35f7c1b40, entries=1, sequenceid=2253, filesize=5.0 K 2024-12-11T02:29:17,121 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(3040): Finished flush of dataSize ~789.11 KB/808050, heapSize ~971.38 KB/994688, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1290ms, sequenceid=2253, compaction requested=false 2024-12-11T02:29:17,122 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T02:29:17,122 DEBUG [M:0;5f57a24c5131:40407 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T02:29:17,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46759 is added to blk_1073741830_1006 (size=955750) 2024-12-11T02:29:17,124 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-11T02:29:17,124 INFO [M:0;5f57a24c5131:40407 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-11T02:29:17,124 INFO [M:0;5f57a24c5131:40407 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40407 2024-12-11T02:29:17,126 DEBUG [M:0;5f57a24c5131:40407 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/5f57a24c5131,40407,1733883963836 already deleted, retry=false 2024-12-11T02:29:17,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T02:29:17,228 INFO [M:0;5f57a24c5131:40407 {}] regionserver.HRegionServer(1307): Exiting; stopping=5f57a24c5131,40407,1733883963836; zookeeper connection closed. 2024-12-11T02:29:17,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1007ee55f5b0000, quorum=127.0.0.1:63149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T02:29:17,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T02:29:17,236 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T02:29:17,236 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T02:29:17,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T02:29:17,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.log.dir/,STOPPED} 2024-12-11T02:29:17,239 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T02:29:17,239 WARN [BP-442462323-172.17.0.2-1733883960905 heartbeating to localhost/127.0.0.1:37113 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T02:29:17,239 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T02:29:17,239 WARN [BP-442462323-172.17.0.2-1733883960905 heartbeating to localhost/127.0.0.1:37113 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-442462323-172.17.0.2-1733883960905 (Datanode Uuid c67fabea-812a-4cd3-8643-47d8f6db4f2d) service to localhost/127.0.0.1:37113 2024-12-11T02:29:17,242 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data1/current/BP-442462323-172.17.0.2-1733883960905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T02:29:17,242 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/cluster_816c3f64-c47c-d296-590e-ab0cf001f1f3/dfs/data/data2/current/BP-442462323-172.17.0.2-1733883960905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T02:29:17,243 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T02:29:17,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T02:29:17,253 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T02:29:17,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T02:29:17,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T02:29:17,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f5b5e49b-9032-ea1c-d306-6bb80ab280b7/hadoop.log.dir/,STOPPED} 2024-12-11T02:29:17,273 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-11T02:29:17,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down